Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
b1257d873b
|
@ -38,7 +38,7 @@ remote clusters that should be connected to, for instance:
|
|||
|
||||
[source,yaml]
|
||||
--------------------------------
|
||||
search:
|
||||
cluster:
|
||||
remote:
|
||||
cluster_one: <1>
|
||||
seeds: 127.0.0.1:9300
|
||||
|
@ -58,7 +58,7 @@ following:
|
|||
PUT _cluster/settings
|
||||
{
|
||||
"persistent": {
|
||||
"search": {
|
||||
"cluster": {
|
||||
"remote": {
|
||||
"cluster_one": {
|
||||
"seeds": [
|
||||
|
@ -94,7 +94,7 @@ because we'll use them later.
|
|||
{
|
||||
"acknowledged" : true,
|
||||
"persistent": {
|
||||
"search": {
|
||||
"cluster": {
|
||||
"remote": {
|
||||
"cluster_one": {
|
||||
"seeds": [
|
||||
|
@ -129,7 +129,7 @@ A remote cluster can be deleted from the cluster settings by setting its seeds t
|
|||
PUT _cluster/settings
|
||||
{
|
||||
"persistent": {
|
||||
"search": {
|
||||
"cluster": {
|
||||
"remote": {
|
||||
"cluster_three": {
|
||||
"seeds": null <1>
|
||||
|
@ -309,7 +309,7 @@ boolean `skip_unavailable` setting, set to `false` by default.
|
|||
PUT _cluster/settings
|
||||
{
|
||||
"persistent": {
|
||||
"search.remote.cluster_two.skip_unavailable": true <1>
|
||||
"cluster.remote.cluster_two.skip_unavailable": true <1>
|
||||
}
|
||||
}
|
||||
--------------------------------
|
||||
|
@ -391,30 +391,30 @@ GET /cluster_one:twitter,cluster_two:twitter,twitter/_search <1>
|
|||
[[cross-cluster-search-settings]]
|
||||
=== Cross cluster search settings
|
||||
|
||||
`search.remote.connections_per_cluster`::
|
||||
`cluster.remote.connections_per_cluster`::
|
||||
|
||||
The number of nodes to connect to per remote cluster. The default is `3`.
|
||||
|
||||
`search.remote.initial_connect_timeout`::
|
||||
`cluster.remote.initial_connect_timeout`::
|
||||
|
||||
The time to wait for remote connections to be established when the node starts. The default is `30s`.
|
||||
|
||||
`search.remote.node.attr`::
|
||||
`cluster.remote.node.attr`::
|
||||
|
||||
A node attribute to filter out nodes that are eligible as a gateway node in
|
||||
the remote cluster. For instance a node can have a node attribute
|
||||
`node.attr.gateway: true` such that only nodes with this attribute will be
|
||||
connected to if `search.remote.node.attr` is set to `gateway`.
|
||||
connected to if `cluster.remote.node.attr` is set to `gateway`.
|
||||
|
||||
`search.remote.connect`::
|
||||
`cluster.remote.connect`::
|
||||
|
||||
By default, any node in the cluster can act as a cross-cluster client and
|
||||
connect to remote clusters. The `search.remote.connect` setting can be set
|
||||
connect to remote clusters. The `cluster.remote.connect` setting can be set
|
||||
to `false` (defaults to `true`) to prevent certain nodes from connecting to
|
||||
remote clusters. Cross-cluster search requests must be sent to a node that
|
||||
is allowed to act as a cross-cluster client.
|
||||
|
||||
`search.remote.${cluster_alias}.skip_unavailable`::
|
||||
`cluster.remote.${cluster_alias}.skip_unavailable`::
|
||||
|
||||
Per cluster boolean setting that allows to skip specific clusters when no
|
||||
nodes belonging to them are available and they are searched as part of a
|
||||
|
|
|
@ -59,7 +59,7 @@ To create a dedicated ingest node when {xpack} is installed, set:
|
|||
node.master: false <1>
|
||||
node.data: false <2>
|
||||
node.ingest: true <3>
|
||||
search.remote.connect: false <4>
|
||||
cluster.remote.connect: false <4>
|
||||
node.ml: false <5>
|
||||
-------------------
|
||||
<1> Disable the `node.master` role (enabled by default).
|
||||
|
@ -75,7 +75,7 @@ To create a dedicated coordinating node when {xpack} is installed, set:
|
|||
node.master: false <1>
|
||||
node.data: false <2>
|
||||
node.ingest: false <3>
|
||||
search.remote.connect: false <4>
|
||||
cluster.remote.connect: false <4>
|
||||
node.ml: false <5>
|
||||
-------------------
|
||||
<1> Disable the `node.master` role (enabled by default).
|
||||
|
@ -105,7 +105,7 @@ To create a dedicated {ml} node, set:
|
|||
node.master: false <1>
|
||||
node.data: false <2>
|
||||
node.ingest: false <3>
|
||||
search.remote.connect: false <4>
|
||||
cluster.remote.connect: false <4>
|
||||
node.ml: true <5>
|
||||
xpack.ml.enabled: true <6>
|
||||
-------------------
|
||||
|
|
|
@ -93,7 +93,7 @@ To create a dedicated master-eligible node, set:
|
|||
node.master: true <1>
|
||||
node.data: false <2>
|
||||
node.ingest: false <3>
|
||||
search.remote.connect: false <4>
|
||||
cluster.remote.connect: false <4>
|
||||
-------------------
|
||||
<1> The `node.master` role is enabled by default.
|
||||
<2> Disable the `node.data` role (enabled by default).
|
||||
|
@ -192,7 +192,7 @@ To create a dedicated data node, set:
|
|||
node.master: false <1>
|
||||
node.data: true <2>
|
||||
node.ingest: false <3>
|
||||
search.remote.connect: false <4>
|
||||
cluster.remote.connect: false <4>
|
||||
-------------------
|
||||
<1> Disable the `node.master` role (enabled by default).
|
||||
<2> The `node.data` role is enabled by default.
|
||||
|
@ -220,7 +220,7 @@ To create a dedicated ingest node, set:
|
|||
node.master: false <1>
|
||||
node.data: false <2>
|
||||
node.ingest: true <3>
|
||||
search.remote.connect: false <4>
|
||||
cluster.remote.connect: false <4>
|
||||
-------------------
|
||||
<1> Disable the `node.master` role (enabled by default).
|
||||
<2> Disable the `node.data` role (enabled by default).
|
||||
|
@ -260,7 +260,7 @@ To create a dedicated coordinating node, set:
|
|||
node.master: false <1>
|
||||
node.data: false <2>
|
||||
node.ingest: false <3>
|
||||
search.remote.connect: false <4>
|
||||
cluster.remote.connect: false <4>
|
||||
-------------------
|
||||
<1> Disable the `node.master` role (enabled by default).
|
||||
<2> Disable the `node.data` role (enabled by default).
|
||||
|
|
|
@ -113,4 +113,20 @@ public class PipelineProcessorTests extends ESTestCase {
|
|||
"Recursive invocation of pipeline [inner] detected.", e.getRootCause().getMessage()
|
||||
);
|
||||
}
|
||||
|
||||
public void testAllowsRepeatedPipelineInvocations() throws Exception {
|
||||
String innerPipelineId = "inner";
|
||||
IngestService ingestService = mock(IngestService.class);
|
||||
IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
|
||||
Map<String, Object> outerConfig = new HashMap<>();
|
||||
outerConfig.put("pipeline", innerPipelineId);
|
||||
PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService);
|
||||
Pipeline inner = new Pipeline(
|
||||
innerPipelineId, null, null, new CompoundProcessor()
|
||||
);
|
||||
when(ingestService.getPipeline(innerPipelineId)).thenReturn(inner);
|
||||
Processor outerProc = factory.create(Collections.emptyMap(), null, outerConfig);
|
||||
outerProc.execute(testIngestDocument);
|
||||
outerProc.execute(testIngestDocument);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,21 +19,9 @@
|
|||
|
||||
package org.elasticsearch.transport.netty4;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.handler.logging.LogLevel;
|
||||
import io.netty.handler.logging.LoggingHandler;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.transport.TcpHeader;
|
||||
import org.elasticsearch.transport.TcpTransport;
|
||||
import org.elasticsearch.transport.TransportStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
final class ESLoggingHandler extends LoggingHandler {
|
||||
|
||||
|
@ -42,92 +30,8 @@ final class ESLoggingHandler extends LoggingHandler {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected String format(final ChannelHandlerContext ctx, final String eventName, final Object arg) {
|
||||
if (arg instanceof ByteBuf) {
|
||||
try {
|
||||
return format(ctx, eventName, (ByteBuf) arg);
|
||||
} catch (final Exception e) {
|
||||
// we really do not want to allow a bug in the formatting handling to escape
|
||||
logger.trace("an exception occurred formatting a trace message", e);
|
||||
// we are going to let this be formatted via the default formatting
|
||||
return super.format(ctx, eventName, arg);
|
||||
}
|
||||
} else {
|
||||
return super.format(ctx, eventName, arg);
|
||||
}
|
||||
public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
|
||||
// We do not want to log read complete events because we log inbound messages in the TcpTransport.
|
||||
ctx.fireChannelReadComplete();
|
||||
}
|
||||
|
||||
private static final int MESSAGE_LENGTH_OFFSET = TcpHeader.MARKER_BYTES_SIZE;
|
||||
private static final int REQUEST_ID_OFFSET = MESSAGE_LENGTH_OFFSET + TcpHeader.MESSAGE_LENGTH_SIZE;
|
||||
private static final int STATUS_OFFSET = REQUEST_ID_OFFSET + TcpHeader.REQUEST_ID_SIZE;
|
||||
private static final int VERSION_ID_OFFSET = STATUS_OFFSET + TcpHeader.STATUS_SIZE;
|
||||
private static final int ACTION_OFFSET = VERSION_ID_OFFSET + TcpHeader.VERSION_ID_SIZE;
|
||||
|
||||
private String format(final ChannelHandlerContext ctx, final String eventName, final ByteBuf arg) throws IOException {
|
||||
final int readableBytes = arg.readableBytes();
|
||||
if (readableBytes == 0) {
|
||||
return super.format(ctx, eventName, arg);
|
||||
} else if (readableBytes >= 2) {
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
sb.append(ctx.channel().toString());
|
||||
final int offset = arg.readerIndex();
|
||||
// this might be an ES message, check the header
|
||||
if (arg.getByte(offset) == (byte) 'E' && arg.getByte(offset + 1) == (byte) 'S') {
|
||||
if (readableBytes == TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE) {
|
||||
final int length = arg.getInt(offset + MESSAGE_LENGTH_OFFSET);
|
||||
if (length == TcpTransport.PING_DATA_SIZE) {
|
||||
sb.append(" [ping]").append(' ').append(eventName).append(": ").append(readableBytes).append('B');
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
else if (readableBytes >= TcpHeader.HEADER_SIZE) {
|
||||
// we are going to try to decode this as an ES message
|
||||
final int length = arg.getInt(offset + MESSAGE_LENGTH_OFFSET);
|
||||
final long requestId = arg.getLong(offset + REQUEST_ID_OFFSET);
|
||||
final byte status = arg.getByte(offset + STATUS_OFFSET);
|
||||
final boolean isRequest = TransportStatus.isRequest(status);
|
||||
final String type = isRequest ? "request" : "response";
|
||||
final String version = Version.fromId(arg.getInt(offset + VERSION_ID_OFFSET)).toString();
|
||||
sb.append(" [length: ").append(length);
|
||||
sb.append(", request id: ").append(requestId);
|
||||
sb.append(", type: ").append(type);
|
||||
sb.append(", version: ").append(version);
|
||||
if (isRequest) {
|
||||
// it looks like an ES request, try to decode the action
|
||||
final int remaining = readableBytes - ACTION_OFFSET;
|
||||
final ByteBuf slice = arg.slice(offset + ACTION_OFFSET, remaining);
|
||||
// the stream might be compressed
|
||||
try (StreamInput in = in(status, slice, remaining)) {
|
||||
// the first bytes in the message is the context headers
|
||||
try (ThreadContext context = new ThreadContext(Settings.EMPTY)) {
|
||||
context.readHeaders(in);
|
||||
}
|
||||
// now we decode the features
|
||||
if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
|
||||
in.readStringArray();
|
||||
}
|
||||
// now we can decode the action name
|
||||
sb.append(", action: ").append(in.readString());
|
||||
}
|
||||
}
|
||||
sb.append(']');
|
||||
sb.append(' ').append(eventName).append(": ").append(readableBytes).append('B');
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
// we could not decode this as an ES message, use the default formatting
|
||||
return super.format(ctx, eventName, arg);
|
||||
}
|
||||
|
||||
private StreamInput in(final Byte status, final ByteBuf slice, final int remaining) throws IOException {
|
||||
final ByteBufStreamInput in = new ByteBufStreamInput(slice, remaining);
|
||||
if (TransportStatus.isCompress(status)) {
|
||||
final Compressor compressor = CompressorFactory.compressor(Netty4Utils.toBytesReference(slice));
|
||||
return compressor.streamInput(in);
|
||||
} else {
|
||||
return in;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,8 +26,6 @@ import io.netty.channel.ChannelHandlerContext;
|
|||
import io.netty.util.Attribute;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.transport.TcpHeader;
|
||||
import org.elasticsearch.transport.Transports;
|
||||
|
||||
|
||||
|
@ -46,23 +44,15 @@ final class Netty4MessageChannelHandler extends ChannelDuplexHandler {
|
|||
@Override
|
||||
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
|
||||
Transports.assertTransportThread();
|
||||
if (!(msg instanceof ByteBuf)) {
|
||||
ctx.fireChannelRead(msg);
|
||||
return;
|
||||
}
|
||||
assert msg instanceof ByteBuf : "Expected message type ByteBuf, found: " + msg.getClass();
|
||||
|
||||
final ByteBuf buffer = (ByteBuf) msg;
|
||||
final int remainingMessageSize = buffer.getInt(buffer.readerIndex() - TcpHeader.MESSAGE_LENGTH_SIZE);
|
||||
final int expectedReaderIndex = buffer.readerIndex() + remainingMessageSize;
|
||||
try {
|
||||
Channel channel = ctx.channel();
|
||||
// netty always copies a buffer, either in NioWorker in its read handler, where it copies to a fresh
|
||||
// buffer, or in the cumulative buffer, which is cleaned each time so it could be bigger than the actual size
|
||||
BytesReference reference = Netty4Utils.toBytesReference(buffer, remainingMessageSize);
|
||||
Attribute<Netty4TcpChannel> channelAttribute = channel.attr(Netty4Transport.CHANNEL_KEY);
|
||||
transport.messageReceived(reference, channelAttribute.get());
|
||||
transport.inboundMessage(channelAttribute.get(), Netty4Utils.toBytesReference(buffer));
|
||||
} finally {
|
||||
// Set the expected position of the buffer, no matter what happened
|
||||
buffer.readerIndex(expectedReaderIndex);
|
||||
buffer.release();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import io.netty.buffer.ByteBuf;
|
|||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.handler.codec.ByteToMessageDecoder;
|
||||
import io.netty.handler.codec.TooLongFrameException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.transport.TcpHeader;
|
||||
import org.elasticsearch.transport.TcpTransport;
|
||||
|
||||
|
@ -36,17 +35,20 @@ final class Netty4SizeHeaderFrameDecoder extends ByteToMessageDecoder {
|
|||
@Override
|
||||
protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
|
||||
try {
|
||||
BytesReference networkBytes = Netty4Utils.toBytesReference(in);
|
||||
int messageLength = TcpTransport.readMessageLength(networkBytes);
|
||||
// If the message length is -1, we have not read a complete header.
|
||||
if (messageLength != -1) {
|
||||
int messageLengthWithHeader = messageLength + HEADER_SIZE;
|
||||
// If the message length is greater than the network bytes available, we have not read a complete frame.
|
||||
if (messageLengthWithHeader <= networkBytes.length()) {
|
||||
final ByteBuf message = in.skipBytes(HEADER_SIZE);
|
||||
// 6 bytes would mean it is a ping. And we should ignore.
|
||||
if (messageLengthWithHeader != 6) {
|
||||
boolean continueDecode = true;
|
||||
while (continueDecode) {
|
||||
int messageLength = TcpTransport.readMessageLength(Netty4Utils.toBytesReference(in));
|
||||
if (messageLength == -1) {
|
||||
continueDecode = false;
|
||||
} else {
|
||||
int messageLengthWithHeader = messageLength + HEADER_SIZE;
|
||||
// If the message length is greater than the network bytes available, we have not read a complete frame.
|
||||
if (messageLengthWithHeader > in.readableBytes()) {
|
||||
continueDecode = false;
|
||||
} else {
|
||||
final ByteBuf message = in.retainedSlice(in.readerIndex() + HEADER_SIZE, messageLength);
|
||||
out.add(message);
|
||||
in.readerIndex(in.readerIndex() + messageLengthWithHeader);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,12 +97,12 @@ public class Netty4Transport extends TcpTransport {
|
|||
intSetting("transport.netty.boss_count", 1, 1, Property.NodeScope);
|
||||
|
||||
|
||||
protected final RecvByteBufAllocator recvByteBufAllocator;
|
||||
protected final int workerCount;
|
||||
protected final ByteSizeValue receivePredictorMin;
|
||||
protected final ByteSizeValue receivePredictorMax;
|
||||
protected volatile Bootstrap bootstrap;
|
||||
protected final Map<String, ServerBootstrap> serverBootstraps = newConcurrentMap();
|
||||
private final RecvByteBufAllocator recvByteBufAllocator;
|
||||
private final int workerCount;
|
||||
private final ByteSizeValue receivePredictorMin;
|
||||
private final ByteSizeValue receivePredictorMax;
|
||||
private volatile Bootstrap clientBootstrap;
|
||||
private final Map<String, ServerBootstrap> serverBootstraps = newConcurrentMap();
|
||||
|
||||
public Netty4Transport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays,
|
||||
NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) {
|
||||
|
@ -125,7 +125,7 @@ public class Netty4Transport extends TcpTransport {
|
|||
protected void doStart() {
|
||||
boolean success = false;
|
||||
try {
|
||||
bootstrap = createBootstrap();
|
||||
clientBootstrap = createClientBootstrap();
|
||||
if (NetworkService.NETWORK_SERVER.get(settings)) {
|
||||
for (ProfileSettings profileSettings : profileSettings) {
|
||||
createServerBootstrap(profileSettings);
|
||||
|
@ -141,13 +141,11 @@ public class Netty4Transport extends TcpTransport {
|
|||
}
|
||||
}
|
||||
|
||||
private Bootstrap createBootstrap() {
|
||||
private Bootstrap createClientBootstrap() {
|
||||
final Bootstrap bootstrap = new Bootstrap();
|
||||
bootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)));
|
||||
bootstrap.channel(NioSocketChannel.class);
|
||||
|
||||
bootstrap.handler(getClientChannelInitializer());
|
||||
|
||||
bootstrap.option(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings));
|
||||
bootstrap.option(ChannelOption.SO_KEEPALIVE, TCP_KEEP_ALIVE.get(settings));
|
||||
|
||||
|
@ -166,8 +164,6 @@ public class Netty4Transport extends TcpTransport {
|
|||
final boolean reuseAddress = TCP_REUSE_ADDRESS.get(settings);
|
||||
bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress);
|
||||
|
||||
bootstrap.validate();
|
||||
|
||||
return bootstrap;
|
||||
}
|
||||
|
||||
|
@ -216,7 +212,7 @@ public class Netty4Transport extends TcpTransport {
|
|||
return new ServerChannelInitializer(name);
|
||||
}
|
||||
|
||||
protected ChannelHandler getClientChannelInitializer() {
|
||||
protected ChannelHandler getClientChannelInitializer(DiscoveryNode node) {
|
||||
return new ClientChannelInitializer();
|
||||
}
|
||||
|
||||
|
@ -226,7 +222,11 @@ public class Netty4Transport extends TcpTransport {
|
|||
@Override
|
||||
protected Netty4TcpChannel initiateChannel(DiscoveryNode node, ActionListener<Void> listener) throws IOException {
|
||||
InetSocketAddress address = node.getAddress().address();
|
||||
ChannelFuture channelFuture = bootstrap.connect(address);
|
||||
Bootstrap bootstrapWithHandler = clientBootstrap.clone();
|
||||
bootstrapWithHandler.handler(getClientChannelInitializer(node));
|
||||
bootstrapWithHandler.remoteAddress(address);
|
||||
ChannelFuture channelFuture = bootstrapWithHandler.connect();
|
||||
|
||||
Channel channel = channelFuture.channel();
|
||||
if (channel == null) {
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(channelFuture.cause());
|
||||
|
@ -289,9 +289,9 @@ public class Netty4Transport extends TcpTransport {
|
|||
}
|
||||
serverBootstraps.clear();
|
||||
|
||||
if (bootstrap != null) {
|
||||
bootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS).awaitUninterruptibly();
|
||||
bootstrap = null;
|
||||
if (clientBootstrap != null) {
|
||||
clientBootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS).awaitUninterruptibly();
|
||||
clientBootstrap = null;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -156,5 +156,4 @@ public class Netty4Utils {
|
|||
throw closingExceptions;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,9 +26,10 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.MockLogAppender;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.transport.TransportLogger;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(numDataNodes = 2)
|
||||
@TestLogging(value = "org.elasticsearch.transport.netty4.ESLoggingHandler:trace")
|
||||
@TestLogging(value = "org.elasticsearch.transport.netty4.ESLoggingHandler:trace,org.elasticsearch.transport.TransportLogger:trace")
|
||||
public class ESLoggingHandlerIT extends ESNetty4IntegTestCase {
|
||||
|
||||
private MockLogAppender appender;
|
||||
|
@ -37,11 +38,13 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase {
|
|||
super.setUp();
|
||||
appender = new MockLogAppender();
|
||||
Loggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender);
|
||||
Loggers.addAppender(Loggers.getLogger(TransportLogger.class), appender);
|
||||
appender.start();
|
||||
}
|
||||
|
||||
public void tearDown() throws Exception {
|
||||
Loggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender);
|
||||
Loggers.removeAppender(Loggers.getLogger(TransportLogger.class), appender);
|
||||
appender.stop();
|
||||
super.tearDown();
|
||||
}
|
||||
|
@ -56,7 +59,7 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase {
|
|||
" WRITE: \\d+B";
|
||||
final MockLogAppender.LoggingExpectation writeExpectation =
|
||||
new MockLogAppender.PatternSeenEventExcpectation(
|
||||
"hot threads request", ESLoggingHandler.class.getCanonicalName(), Level.TRACE, writePattern);
|
||||
"hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern);
|
||||
|
||||
final MockLogAppender.LoggingExpectation flushExpectation =
|
||||
new MockLogAppender.SeenEventExpectation("flush", ESLoggingHandler.class.getCanonicalName(), Level.TRACE, "*FLUSH*");
|
||||
|
@ -71,7 +74,7 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase {
|
|||
|
||||
final MockLogAppender.LoggingExpectation readExpectation =
|
||||
new MockLogAppender.PatternSeenEventExcpectation(
|
||||
"hot threads request", ESLoggingHandler.class.getCanonicalName(), Level.TRACE, readPattern);
|
||||
"hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern);
|
||||
|
||||
appender.addExpectation(writeExpectation);
|
||||
appender.addExpectation(flushExpectation);
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.ingest.geoip;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.maxmind.db.NodeCache;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.cache.Cache;
|
||||
import org.elasticsearch.common.cache.CacheBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
final class GeoIpCache implements NodeCache {
|
||||
private final Cache<Integer, JsonNode> cache;
|
||||
|
||||
GeoIpCache(long maxSize) {
|
||||
this.cache = CacheBuilder.<Integer, JsonNode>builder().setMaximumWeight(maxSize).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public JsonNode get(int key, Loader loader) throws IOException {
|
||||
try {
|
||||
return cache.computeIfAbsent(key, loader::load);
|
||||
} catch (ExecutionException e) {
|
||||
Throwable cause = e.getCause() != null ? e.getCause() : e;
|
||||
throw new ElasticsearchException(cause);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.network.NetworkAddress;
|
|||
import org.elasticsearch.ingest.AbstractProcessor;
|
||||
import org.elasticsearch.ingest.IngestDocument;
|
||||
import org.elasticsearch.ingest.Processor;
|
||||
import org.elasticsearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.security.AccessController;
|
||||
|
@ -66,14 +67,18 @@ public final class GeoIpProcessor extends AbstractProcessor {
|
|||
private final DatabaseReader dbReader;
|
||||
private final Set<Property> properties;
|
||||
private final boolean ignoreMissing;
|
||||
private final GeoIpCache cache;
|
||||
|
||||
GeoIpProcessor(String tag, String field, DatabaseReader dbReader, String targetField, Set<Property> properties, boolean ignoreMissing) {
|
||||
|
||||
GeoIpProcessor(String tag, String field, DatabaseReader dbReader, String targetField, Set<Property> properties, boolean ignoreMissing,
|
||||
GeoIpCache cache) {
|
||||
super(tag);
|
||||
this.field = field;
|
||||
this.targetField = targetField;
|
||||
this.dbReader = dbReader;
|
||||
this.properties = properties;
|
||||
this.ignoreMissing = ignoreMissing;
|
||||
this.cache = cache;
|
||||
}
|
||||
|
||||
boolean isIgnoreMissing() {
|
||||
|
@ -146,15 +151,16 @@ public final class GeoIpProcessor extends AbstractProcessor {
|
|||
|
||||
private Map<String, Object> retrieveCityGeoData(InetAddress ipAddress) {
|
||||
SpecialPermission.check();
|
||||
CityResponse response = AccessController.doPrivileged((PrivilegedAction<CityResponse>) () -> {
|
||||
try {
|
||||
return dbReader.city(ipAddress);
|
||||
} catch (AddressNotFoundException e) {
|
||||
throw new AddressNotFoundRuntimeException(e);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
CityResponse response = AccessController.doPrivileged((PrivilegedAction<CityResponse>) () ->
|
||||
cache.putIfAbsent(ipAddress, CityResponse.class, ip -> {
|
||||
try {
|
||||
return dbReader.city(ip);
|
||||
} catch (AddressNotFoundException e) {
|
||||
throw new AddressNotFoundRuntimeException(e);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}));
|
||||
|
||||
Country country = response.getCountry();
|
||||
City city = response.getCity();
|
||||
|
@ -231,15 +237,16 @@ public final class GeoIpProcessor extends AbstractProcessor {
|
|||
|
||||
private Map<String, Object> retrieveCountryGeoData(InetAddress ipAddress) {
|
||||
SpecialPermission.check();
|
||||
CountryResponse response = AccessController.doPrivileged((PrivilegedAction<CountryResponse>) () -> {
|
||||
try {
|
||||
return dbReader.country(ipAddress);
|
||||
} catch (AddressNotFoundException e) {
|
||||
throw new AddressNotFoundRuntimeException(e);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
CountryResponse response = AccessController.doPrivileged((PrivilegedAction<CountryResponse>) () ->
|
||||
cache.putIfAbsent(ipAddress, CountryResponse.class, ip -> {
|
||||
try {
|
||||
return dbReader.country(ip);
|
||||
} catch (AddressNotFoundException e) {
|
||||
throw new AddressNotFoundRuntimeException(e);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}));
|
||||
|
||||
Country country = response.getCountry();
|
||||
Continent continent = response.getContinent();
|
||||
|
@ -275,15 +282,16 @@ public final class GeoIpProcessor extends AbstractProcessor {
|
|||
|
||||
private Map<String, Object> retrieveAsnGeoData(InetAddress ipAddress) {
|
||||
SpecialPermission.check();
|
||||
AsnResponse response = AccessController.doPrivileged((PrivilegedAction<AsnResponse>) () -> {
|
||||
try {
|
||||
return dbReader.asn(ipAddress);
|
||||
} catch (AddressNotFoundException e) {
|
||||
throw new AddressNotFoundRuntimeException(e);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
AsnResponse response = AccessController.doPrivileged((PrivilegedAction<AsnResponse>) () ->
|
||||
cache.putIfAbsent(ipAddress, AsnResponse.class, ip -> {
|
||||
try {
|
||||
return dbReader.asn(ip);
|
||||
} catch (AddressNotFoundException e) {
|
||||
throw new AddressNotFoundRuntimeException(e);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}));
|
||||
|
||||
Integer asn = response.getAutonomousSystemNumber();
|
||||
String organization_name = response.getAutonomousSystemOrganization();
|
||||
|
@ -322,9 +330,11 @@ public final class GeoIpProcessor extends AbstractProcessor {
|
|||
);
|
||||
|
||||
private final Map<String, DatabaseReaderLazyLoader> databaseReaders;
|
||||
private final GeoIpCache cache;
|
||||
|
||||
public Factory(Map<String, DatabaseReaderLazyLoader> databaseReaders) {
|
||||
public Factory(Map<String, DatabaseReaderLazyLoader> databaseReaders, GeoIpCache cache) {
|
||||
this.databaseReaders = databaseReaders;
|
||||
this.cache = cache;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -368,14 +378,15 @@ public final class GeoIpProcessor extends AbstractProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
return new GeoIpProcessor(processorTag, ipField, databaseReader, targetField, properties, ignoreMissing);
|
||||
return new GeoIpProcessor(processorTag, ipField, databaseReader, targetField, properties, ignoreMissing, cache);
|
||||
}
|
||||
}
|
||||
|
||||
// Geoip2's AddressNotFoundException is checked and due to the fact that we need run their code
|
||||
// inside a PrivilegedAction code block, we are forced to catch any checked exception and rethrow
|
||||
// it with an unchecked exception.
|
||||
private static final class AddressNotFoundRuntimeException extends RuntimeException {
|
||||
//package private for testing
|
||||
static final class AddressNotFoundRuntimeException extends RuntimeException {
|
||||
|
||||
AddressNotFoundRuntimeException(Throwable cause) {
|
||||
super(cause);
|
||||
|
|
|
@ -23,16 +23,20 @@ import com.maxmind.db.NoCache;
|
|||
import com.maxmind.db.NodeCache;
|
||||
import com.maxmind.db.Reader;
|
||||
import com.maxmind.geoip2.DatabaseReader;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import com.maxmind.geoip2.model.AbstractResponse;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.cache.Cache;
|
||||
import org.elasticsearch.common.cache.CacheBuilder;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.ingest.Processor;
|
||||
import org.elasticsearch.plugins.IngestPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.PathMatcher;
|
||||
|
@ -42,6 +46,8 @@ import java.util.HashMap;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable {
|
||||
|
@ -61,24 +67,18 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable
|
|||
throw new IllegalStateException("getProcessors called twice for geoip plugin!!");
|
||||
}
|
||||
Path geoIpConfigDirectory = parameters.env.configFile().resolve("ingest-geoip");
|
||||
NodeCache cache;
|
||||
long cacheSize = CACHE_SIZE.get(parameters.env.settings());
|
||||
if (cacheSize > 0) {
|
||||
cache = new GeoIpCache(cacheSize);
|
||||
} else {
|
||||
cache = NoCache.getInstance();
|
||||
}
|
||||
try {
|
||||
databaseReaders = loadDatabaseReaders(geoIpConfigDirectory, cache);
|
||||
databaseReaders = loadDatabaseReaders(geoIpConfigDirectory);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return Collections.singletonMap(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders));
|
||||
return Collections.singletonMap(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(cacheSize)));
|
||||
}
|
||||
|
||||
static Map<String, DatabaseReaderLazyLoader> loadDatabaseReaders(Path geoIpConfigDirectory, NodeCache cache) throws IOException {
|
||||
static Map<String, DatabaseReaderLazyLoader> loadDatabaseReaders(Path geoIpConfigDirectory) throws IOException {
|
||||
if (Files.exists(geoIpConfigDirectory) == false && Files.isDirectory(geoIpConfigDirectory)) {
|
||||
throw new IllegalStateException("the geoip directory [" + geoIpConfigDirectory + "] containing databases doesn't exist");
|
||||
throw new IllegalStateException("the geoip directory [" + geoIpConfigDirectory + "] containing databases doesn't exist");
|
||||
}
|
||||
boolean loadDatabaseOnHeap = Booleans.parseBoolean(System.getProperty("es.geoip.load_db_on_heap", "false"));
|
||||
Map<String, DatabaseReaderLazyLoader> databaseReaders = new HashMap<>();
|
||||
|
@ -92,7 +92,7 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable
|
|||
String databaseFileName = databasePath.getFileName().toString();
|
||||
DatabaseReaderLazyLoader holder = new DatabaseReaderLazyLoader(databaseFileName,
|
||||
() -> {
|
||||
DatabaseReader.Builder builder = createDatabaseBuilder(databasePath).withCache(cache);
|
||||
DatabaseReader.Builder builder = createDatabaseBuilder(databasePath).withCache(NoCache.getInstance());
|
||||
if (loadDatabaseOnHeap) {
|
||||
builder.fileMode(Reader.FileMode.MEMORY);
|
||||
} else {
|
||||
|
@ -119,4 +119,75 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The in-memory cache for the geoip data. There should only be 1 instance of this class..
|
||||
* This cache differs from the maxmind's {@link NodeCache} such that this cache stores the deserialized Json objects to avoid the
|
||||
* cost of deserialization for each lookup (cached or not). This comes at slight expense of higher memory usage, but significant
|
||||
* reduction of CPU usage.
|
||||
*/
|
||||
static class GeoIpCache {
|
||||
private final Cache<CacheKey, AbstractResponse> cache;
|
||||
|
||||
//package private for testing
|
||||
GeoIpCache(long maxSize) {
|
||||
if (maxSize < 0) {
|
||||
throw new IllegalArgumentException("geoip max cache size must be 0 or greater");
|
||||
}
|
||||
this.cache = CacheBuilder.<CacheKey, AbstractResponse>builder().setMaximumWeight(maxSize).build();
|
||||
}
|
||||
|
||||
<T extends AbstractResponse> T putIfAbsent(InetAddress ip, Class<T> responseType,
|
||||
Function<InetAddress, AbstractResponse> retrieveFunction) {
|
||||
|
||||
//can't use cache.computeIfAbsent due to the elevated permissions for the jackson (run via the cache loader)
|
||||
CacheKey<T> cacheKey = new CacheKey<>(ip, responseType);
|
||||
//intentionally non-locking for simplicity...it's OK if we re-put the same key/value in the cache during a race condition.
|
||||
AbstractResponse response = cache.get(cacheKey);
|
||||
if (response == null) {
|
||||
response = retrieveFunction.apply(ip);
|
||||
cache.put(cacheKey, response);
|
||||
}
|
||||
return responseType.cast(response);
|
||||
}
|
||||
|
||||
//only useful for testing
|
||||
<T extends AbstractResponse> T get(InetAddress ip, Class<T> responseType) {
|
||||
CacheKey<T> cacheKey = new CacheKey<>(ip, responseType);
|
||||
return responseType.cast(cache.get(cacheKey));
|
||||
}
|
||||
|
||||
/**
|
||||
* The key to use for the cache. Since this cache can span multiple geoip processors that all use different databases, the response
|
||||
* type is needed to be included in the cache key. For example, if we only used the IP address as the key the City and ASN the same
|
||||
* IP may be in both with different values and we need to cache both. The response type scopes the IP to the correct database
|
||||
* provides a means to safely cast the return objects.
|
||||
* @param <T> The AbstractResponse type used to scope the key and cast the result.
|
||||
*/
|
||||
private static class CacheKey<T extends AbstractResponse> {
|
||||
|
||||
private final InetAddress ip;
|
||||
private final Class<T> responseType;
|
||||
|
||||
private CacheKey(InetAddress ip, Class<T> responseType) {
|
||||
this.ip = ip;
|
||||
this.responseType = responseType;
|
||||
}
|
||||
|
||||
//generated
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
CacheKey<?> cacheKey = (CacheKey<?>) o;
|
||||
return Objects.equals(ip, cacheKey.ip) &&
|
||||
Objects.equals(responseType, cacheKey.responseType);
|
||||
}
|
||||
|
||||
//generated
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(ip, responseType);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.ingest.geoip;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.node.IntNode;
|
||||
import com.maxmind.db.NodeCache;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
public class GeoIpCacheTests extends ESTestCase {
|
||||
public void testCachesAndEvictsResults() throws Exception {
|
||||
GeoIpCache cache = new GeoIpCache(1);
|
||||
final NodeCache.Loader loader = key -> new IntNode(key);
|
||||
|
||||
JsonNode jsonNode1 = cache.get(1, loader);
|
||||
assertSame(jsonNode1, cache.get(1, loader));
|
||||
|
||||
// evict old key by adding another value
|
||||
cache.get(2, loader);
|
||||
|
||||
assertNotSame(jsonNode1, cache.get(1, loader));
|
||||
}
|
||||
|
||||
public void testThrowsElasticsearchException() throws Exception {
|
||||
GeoIpCache cache = new GeoIpCache(1);
|
||||
NodeCache.Loader loader = (int key) -> {
|
||||
throw new IllegalArgumentException("Illegal key");
|
||||
};
|
||||
ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> cache.get(1, loader));
|
||||
assertTrue("Expected cause to be of type IllegalArgumentException but was [" + ex.getCause().getClass() + "]",
|
||||
ex.getCause() instanceof IllegalArgumentException);
|
||||
assertEquals("Illegal key", ex.getCause().getMessage());
|
||||
}
|
||||
}
|
|
@ -20,11 +20,10 @@
|
|||
package org.elasticsearch.ingest.geoip;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import com.maxmind.db.NoCache;
|
||||
import com.maxmind.db.NodeCache;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.StreamsUtils;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -69,8 +68,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
Files.copy(new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-ASN.mmdb")),
|
||||
geoIpConfigDir.resolve("GeoLite2-ASN.mmdb"));
|
||||
|
||||
NodeCache cache = randomFrom(NoCache.getInstance(), new GeoIpCache(randomNonNegativeLong()));
|
||||
databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpConfigDir, cache);
|
||||
databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpConfigDir);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -92,7 +90,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
// This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected.
|
||||
// As a consequence, the corresponding file appears to be still in use and Windows cannot delete it.
|
||||
assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
|
@ -111,7 +109,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
// This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected.
|
||||
// As a consequence, the corresponding file appears to be still in use and Windows cannot delete it.
|
||||
assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
|
@ -131,7 +129,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
// This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected.
|
||||
// As a consequence, the corresponding file appears to be still in use and Windows cannot delete it.
|
||||
assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
|
@ -152,7 +150,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
// This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected.
|
||||
// As a consequence, the corresponding file appears to be still in use and Windows cannot delete it.
|
||||
assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
|
@ -173,7 +171,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
// This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected.
|
||||
// As a consequence, the corresponding file appears to be still in use and Windows cannot delete it.
|
||||
assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000));
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
config.put("target_field", "_field");
|
||||
|
@ -187,7 +185,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
// This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected.
|
||||
// As a consequence, the corresponding file appears to be still in use and Windows cannot delete it.
|
||||
assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000));
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
config.put("database_file", "GeoLite2-Country.mmdb");
|
||||
|
@ -203,7 +201,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
// This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected.
|
||||
// As a consequence, the corresponding file appears to be still in use and Windows cannot delete it.
|
||||
assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000));
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
config.put("database_file", "GeoLite2-Country.mmdb");
|
||||
|
@ -220,7 +218,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
// This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected.
|
||||
// As a consequence, the corresponding file appears to be still in use and Windows cannot delete it.
|
||||
assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000));
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
config.put("database_file", "GeoLite2-ASN.mmdb");
|
||||
|
@ -237,7 +235,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
// This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected.
|
||||
// As a consequence, the corresponding file appears to be still in use and Windows cannot delete it.
|
||||
assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
|
@ -250,7 +248,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
// This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected.
|
||||
// As a consequence, the corresponding file appears to be still in use and Windows cannot delete it.
|
||||
assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000));
|
||||
|
||||
Set<GeoIpProcessor.Property> properties = EnumSet.noneOf(GeoIpProcessor.Property.class);
|
||||
List<String> fieldNames = new ArrayList<>();
|
||||
|
@ -277,7 +275,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
// This test uses a MappedByteBuffer which will keep the file mappings active until it is garbage-collected.
|
||||
// As a consequence, the corresponding file appears to be still in use and Windows cannot delete it.
|
||||
assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> config1 = new HashMap<>();
|
||||
config1.put("field", "_field");
|
||||
|
@ -311,8 +309,8 @@ public class GeoIpProcessorFactoryTests extends ESTestCase {
|
|||
// database readers used at class level are reused between tests. (we want to keep that otherwise running this
|
||||
// test will take roughly 4 times more time)
|
||||
Map<String, DatabaseReaderLazyLoader> databaseReaders =
|
||||
IngestGeoIpPlugin.loadDatabaseReaders(geoIpConfigDir, NoCache.getInstance());
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders);
|
||||
IngestGeoIpPlugin.loadDatabaseReaders(geoIpConfigDir);
|
||||
GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000));
|
||||
for (DatabaseReaderLazyLoader lazyLoader : databaseReaders.values()) {
|
||||
assertNull(lazyLoader.databaseReader.get());
|
||||
}
|
||||
|
|
|
@ -20,8 +20,9 @@
|
|||
package org.elasticsearch.ingest.geoip;
|
||||
|
||||
import com.maxmind.geoip2.DatabaseReader;
|
||||
import org.elasticsearch.ingest.RandomDocumentPicks;
|
||||
import org.elasticsearch.ingest.IngestDocument;
|
||||
import org.elasticsearch.ingest.RandomDocumentPicks;
|
||||
import org.elasticsearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.InputStream;
|
||||
|
@ -40,7 +41,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
|||
public void testCity() throws Exception {
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false);
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false,
|
||||
new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> document = new HashMap<>();
|
||||
document.put("source_field", "8.8.8.8");
|
||||
|
@ -64,7 +66,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
|||
public void testNullValueWithIgnoreMissing() throws Exception {
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), true);
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), true,
|
||||
new GeoIpCache(1000));
|
||||
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(),
|
||||
Collections.singletonMap("source_field", null));
|
||||
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
|
||||
|
@ -75,7 +78,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
|||
public void testNonExistentWithIgnoreMissing() throws Exception {
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), true);
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), true,
|
||||
new GeoIpCache(1000));
|
||||
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap());
|
||||
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
|
||||
processor.execute(ingestDocument);
|
||||
|
@ -85,7 +89,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
|||
public void testNullWithoutIgnoreMissing() throws Exception {
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false);
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false,
|
||||
new GeoIpCache(1000));
|
||||
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(),
|
||||
Collections.singletonMap("source_field", null));
|
||||
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
|
||||
|
@ -96,7 +101,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
|||
public void testNonExistentWithoutIgnoreMissing() throws Exception {
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false);
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false,
|
||||
new GeoIpCache(1000));
|
||||
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap());
|
||||
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
|
||||
Exception exception = expectThrows(Exception.class, () -> processor.execute(ingestDocument));
|
||||
|
@ -106,7 +112,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
|||
public void testCity_withIpV6() throws Exception {
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false);
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false,
|
||||
new GeoIpCache(1000));
|
||||
|
||||
String address = "2602:306:33d3:8000::3257:9652";
|
||||
Map<String, Object> document = new HashMap<>();
|
||||
|
@ -135,7 +142,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
|||
public void testCityWithMissingLocation() throws Exception {
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false);
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false,
|
||||
new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> document = new HashMap<>();
|
||||
document.put("source_field", "80.231.5.0");
|
||||
|
@ -152,7 +160,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
|||
public void testCountry() throws Exception {
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-Country.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false);
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false,
|
||||
new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> document = new HashMap<>();
|
||||
document.put("source_field", "82.170.213.79");
|
||||
|
@ -172,7 +181,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
|||
public void testCountryWithMissingLocation() throws Exception {
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-Country.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false);
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false,
|
||||
new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> document = new HashMap<>();
|
||||
document.put("source_field", "80.231.5.0");
|
||||
|
@ -190,7 +200,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
|||
String ip = "82.170.213.79";
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-ASN.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false);
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false,
|
||||
new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> document = new HashMap<>();
|
||||
document.put("source_field", ip);
|
||||
|
@ -209,7 +220,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
|||
public void testAddressIsNotInTheDatabase() throws Exception {
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false);
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false,
|
||||
new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> document = new HashMap<>();
|
||||
document.put("source_field", "127.0.0.1");
|
||||
|
@ -222,7 +234,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
|||
public void testInvalid() throws Exception {
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-City.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false);
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false,
|
||||
new GeoIpCache(1000));
|
||||
|
||||
Map<String, Object> document = new HashMap<>();
|
||||
document.put("source_field", "www.google.com");
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.ingest.geoip;
|
||||
|
||||
import com.maxmind.geoip2.model.AbstractResponse;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class IngestGeoIpPluginTests extends ESTestCase {
|
||||
|
||||
public void testCachesAndEvictsResults() {
|
||||
GeoIpCache cache = new GeoIpCache(1);
|
||||
AbstractResponse response1 = mock(AbstractResponse.class);
|
||||
AbstractResponse response2 = mock(AbstractResponse.class);
|
||||
|
||||
//add a key
|
||||
AbstractResponse cachedResponse = cache.putIfAbsent(InetAddresses.forString("127.0.0.1"), AbstractResponse.class, ip -> response1);
|
||||
assertSame(cachedResponse, response1);
|
||||
assertSame(cachedResponse, cache.putIfAbsent(InetAddresses.forString("127.0.0.1"), AbstractResponse.class, ip -> response1));
|
||||
assertSame(cachedResponse, cache.get(InetAddresses.forString("127.0.0.1"), AbstractResponse.class));
|
||||
|
||||
|
||||
// evict old key by adding another value
|
||||
cachedResponse = cache.putIfAbsent(InetAddresses.forString("127.0.0.2"), AbstractResponse.class, ip -> response2);
|
||||
assertSame(cachedResponse, response2);
|
||||
assertSame(cachedResponse, cache.putIfAbsent(InetAddresses.forString("127.0.0.2"), AbstractResponse.class, ip -> response2));
|
||||
assertSame(cachedResponse, cache.get(InetAddresses.forString("127.0.0.2"), AbstractResponse.class));
|
||||
|
||||
assertNotSame(response1, cache.get(InetAddresses.forString("127.0.0.1"), AbstractResponse.class));
|
||||
}
|
||||
|
||||
public void testThrowsFunctionsException() {
|
||||
GeoIpCache cache = new GeoIpCache(1);
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> cache.putIfAbsent(InetAddresses.forString("127.0.0.1"), AbstractResponse.class,
|
||||
ip -> { throw new IllegalArgumentException("bad"); }));
|
||||
assertEquals("bad", ex.getMessage());
|
||||
}
|
||||
|
||||
public void testInvalidInit() {
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new GeoIpCache(-1));
|
||||
assertEquals("geoip max cache size must be 0 or greater", ex.getMessage());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.transport.nio;
|
||||
|
||||
import org.apache.logging.log4j.Level;
|
||||
import org.elasticsearch.NioIntegTestCase;
|
||||
import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.MockLogAppender;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.transport.TransportLogger;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(numDataNodes = 2)
|
||||
@TestLogging(value = "org.elasticsearch.transport.TransportLogger:trace")
|
||||
public class NioTransportLoggingIT extends NioIntegTestCase {
|
||||
|
||||
private MockLogAppender appender;
|
||||
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
appender = new MockLogAppender();
|
||||
Loggers.addAppender(Loggers.getLogger(TransportLogger.class), appender);
|
||||
appender.start();
|
||||
}
|
||||
|
||||
public void tearDown() throws Exception {
|
||||
Loggers.removeAppender(Loggers.getLogger(TransportLogger.class), appender);
|
||||
appender.stop();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
public void testLoggingHandler() throws IllegalAccessException {
|
||||
final String writePattern =
|
||||
".*\\[length: \\d+" +
|
||||
", request id: \\d+" +
|
||||
", type: request" +
|
||||
", version: .*" +
|
||||
", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" +
|
||||
" WRITE: \\d+B";
|
||||
final MockLogAppender.LoggingExpectation writeExpectation =
|
||||
new MockLogAppender.PatternSeenEventExcpectation(
|
||||
"hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern);
|
||||
|
||||
final String readPattern =
|
||||
".*\\[length: \\d+" +
|
||||
", request id: \\d+" +
|
||||
", type: request" +
|
||||
", version: .*" +
|
||||
", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" +
|
||||
" READ: \\d+B";
|
||||
|
||||
final MockLogAppender.LoggingExpectation readExpectation =
|
||||
new MockLogAppender.PatternSeenEventExcpectation(
|
||||
"hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern);
|
||||
|
||||
appender.addExpectation(writeExpectation);
|
||||
appender.addExpectation(readExpectation);
|
||||
client().admin().cluster().nodesHotThreads(new NodesHotThreadsRequest()).actionGet();
|
||||
appender.assertAllExpectationsMatched();
|
||||
}
|
||||
}
|
|
@ -235,8 +235,8 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase {
|
|||
() -> client().performRequest(request));
|
||||
assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode());
|
||||
assertThat(responseException.getMessage(),
|
||||
containsString("Missing required setting [search.remote.remote1.seeds] " +
|
||||
"for setting [search.remote.remote1.skip_unavailable]"));
|
||||
containsString("Missing required setting [cluster.remote.remote1.seeds] " +
|
||||
"for setting [cluster.remote.remote1.skip_unavailable]"));
|
||||
}
|
||||
|
||||
Map<String, Object> settingsMap = new HashMap<>();
|
||||
|
@ -251,8 +251,8 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase {
|
|||
ResponseException responseException = expectThrows(ResponseException.class,
|
||||
() -> client().performRequest(request));
|
||||
assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode());
|
||||
assertThat(responseException.getMessage(), containsString("Missing required setting [search.remote.remote1.seeds] " +
|
||||
"for setting [search.remote.remote1.skip_unavailable]"));
|
||||
assertThat(responseException.getMessage(), containsString("Missing required setting [cluster.remote.remote1.seeds] " +
|
||||
"for setting [cluster.remote.remote1.skip_unavailable]"));
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
|
@ -304,7 +304,7 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase {
|
|||
{
|
||||
builder.startObject("persistent");
|
||||
{
|
||||
builder.startObject("search.remote.remote1");
|
||||
builder.startObject("cluster.remote.remote1");
|
||||
{
|
||||
for (Map.Entry<String, Object> entry : settings.entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
|
|
|
@ -28,7 +28,7 @@ task remoteClusterTest(type: RestIntegTestTask) {
|
|||
remoteClusterTestCluster {
|
||||
numNodes = 2
|
||||
clusterName = 'remote-cluster'
|
||||
setting 'search.remote.connect', false
|
||||
setting 'cluster.remote.connect', false
|
||||
}
|
||||
|
||||
remoteClusterTestRunner {
|
||||
|
@ -39,9 +39,9 @@ task mixedClusterTest(type: RestIntegTestTask) {}
|
|||
|
||||
mixedClusterTestCluster {
|
||||
dependsOn remoteClusterTestRunner
|
||||
setting 'search.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\""
|
||||
setting 'search.remote.connections_per_cluster', 1
|
||||
setting 'search.remote.connect', true
|
||||
setting 'cluster.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\""
|
||||
setting 'cluster.remote.connections_per_cluster', 1
|
||||
setting 'cluster.remote.connect', true
|
||||
}
|
||||
|
||||
mixedClusterTestRunner {
|
||||
|
|
|
@ -99,16 +99,16 @@
|
|||
cluster.get_settings:
|
||||
include_defaults: true
|
||||
|
||||
- set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip }
|
||||
- set: { defaults.cluster.remote.my_remote_cluster.seeds.0: remote_ip }
|
||||
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
flat_settings: true
|
||||
body:
|
||||
transient:
|
||||
search.remote.test_remote_cluster.seeds: $remote_ip
|
||||
cluster.remote.test_remote_cluster.seeds: $remote_ip
|
||||
|
||||
- match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}}
|
||||
- match: {transient: {cluster.remote.test_remote_cluster.seeds: $remote_ip}}
|
||||
|
||||
- do:
|
||||
search:
|
||||
|
@ -124,16 +124,16 @@
|
|||
cluster.get_settings:
|
||||
include_defaults: true
|
||||
|
||||
- set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip }
|
||||
- set: { defaults.cluster.remote.my_remote_cluster.seeds.0: remote_ip }
|
||||
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
flat_settings: true
|
||||
body:
|
||||
transient:
|
||||
search.remote.test_remote_cluster.seeds: $remote_ip
|
||||
cluster.remote.test_remote_cluster.seeds: $remote_ip
|
||||
|
||||
- match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}}
|
||||
- match: {transient: {cluster.remote.test_remote_cluster.seeds: $remote_ip}}
|
||||
|
||||
- do:
|
||||
search:
|
||||
|
|
|
@ -14,16 +14,16 @@
|
|||
cluster.get_settings:
|
||||
include_defaults: true
|
||||
|
||||
- set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip }
|
||||
- set: { defaults.cluster.remote.my_remote_cluster.seeds.0: remote_ip }
|
||||
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
flat_settings: true
|
||||
body:
|
||||
transient:
|
||||
search.remote.test_remote_cluster.seeds: $remote_ip
|
||||
cluster.remote.test_remote_cluster.seeds: $remote_ip
|
||||
|
||||
- match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}}
|
||||
- match: {transient: {cluster.remote.test_remote_cluster.seeds: $remote_ip}}
|
||||
|
||||
# we do another search here since this will enforce the connection to be established
|
||||
# otherwise the cluster might not have been connected yet.
|
||||
|
@ -56,7 +56,7 @@
|
|||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.remote.test_remote_cluster.seeds: null
|
||||
cluster.remote.test_remote_cluster.seeds: null
|
||||
|
||||
---
|
||||
"skip_unavailable is returned as part of _remote/info response":
|
||||
|
@ -68,16 +68,16 @@
|
|||
cluster.get_settings:
|
||||
include_defaults: true
|
||||
|
||||
- set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip }
|
||||
- set: { defaults.cluster.remote.my_remote_cluster.seeds.0: remote_ip }
|
||||
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
flat_settings: true
|
||||
body:
|
||||
transient:
|
||||
search.remote.remote1.seeds: $remote_ip
|
||||
cluster.remote.remote1.seeds: $remote_ip
|
||||
|
||||
- match: {transient: {search.remote.remote1.seeds: $remote_ip}}
|
||||
- match: {transient: {cluster.remote.remote1.seeds: $remote_ip}}
|
||||
|
||||
- do:
|
||||
cluster.remote_info: {}
|
||||
|
@ -87,9 +87,9 @@
|
|||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.remote.remote1.skip_unavailable: true
|
||||
cluster.remote.remote1.skip_unavailable: true
|
||||
|
||||
- is_true: transient.search.remote.remote1.skip_unavailable
|
||||
- is_true: transient.cluster.remote.remote1.skip_unavailable
|
||||
|
||||
- do:
|
||||
cluster.remote_info: {}
|
||||
|
@ -100,9 +100,9 @@
|
|||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.remote.remote1.skip_unavailable: false
|
||||
cluster.remote.remote1.skip_unavailable: false
|
||||
|
||||
- is_false: transient.search.remote.remote1.skip_unavailable
|
||||
- is_false: transient.cluster.remote.remote1.skip_unavailable
|
||||
|
||||
- do:
|
||||
cluster.remote_info: {}
|
||||
|
@ -113,7 +113,7 @@
|
|||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.remote.remote1.skip_unavailable: null
|
||||
cluster.remote.remote1.skip_unavailable: null
|
||||
|
||||
- match: {transient: {}}
|
||||
|
||||
|
@ -126,5 +126,5 @@
|
|||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.remote.remote1.seeds: null
|
||||
search.remote.remote1.skip_unavailable: null
|
||||
cluster.remote.remote1.seeds: null
|
||||
cluster.remote.remote1.skip_unavailable: null
|
||||
|
|
|
@ -138,6 +138,15 @@ public class Regex {
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to {@link #simpleMatch(String[], String)}, but accepts a list of strings instead of an array of strings for the patterns to
|
||||
* match.
|
||||
*/
|
||||
public static boolean simpleMatch(final List<String> patterns, final String str) {
|
||||
// #simpleMatch(String[], String) is likely to be inlined into this method
|
||||
return patterns != null && simpleMatch(patterns.toArray(Strings.EMPTY_ARRAY), str);
|
||||
}
|
||||
|
||||
public static boolean simpleMatch(String[] patterns, String[] types) {
|
||||
if (patterns != null && types != null) {
|
||||
for (String type : types) {
|
||||
|
|
|
@ -273,12 +273,19 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
|
||||
TransportSearchAction.SHARD_COUNT_LIMIT_SETTING,
|
||||
RemoteClusterAware.REMOTE_CLUSTERS_SEEDS,
|
||||
RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS,
|
||||
RemoteClusterAware.REMOTE_CLUSTERS_PROXY,
|
||||
RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY,
|
||||
RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE,
|
||||
RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE,
|
||||
RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER,
|
||||
RemoteClusterService.SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER,
|
||||
RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING,
|
||||
RemoteClusterService.SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING,
|
||||
RemoteClusterService.REMOTE_NODE_ATTRIBUTE,
|
||||
RemoteClusterService.SEARCH_REMOTE_NODE_ATTRIBUTE,
|
||||
RemoteClusterService.ENABLE_REMOTE_CLUSTERS,
|
||||
RemoteClusterService.SEARCH_ENABLE_REMOTE_CLUSTERS,
|
||||
TransportService.TRACE_LOG_EXCLUDE_SETTING,
|
||||
TransportService.TRACE_LOG_INCLUDE_SETTING,
|
||||
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
@ -753,7 +754,7 @@ public class Setting<T> implements ToXContentObject {
|
|||
|
||||
/**
|
||||
* Returns the namespace for a concrete setting. Ie. an affix setting with prefix: {@code search.} and suffix: {@code username}
|
||||
* will return {@code remote} as a namespace for the setting {@code search.remote.username}
|
||||
* will return {@code remote} as a namespace for the setting {@code cluster.remote.username}
|
||||
*/
|
||||
public String getNamespace(Setting<T> concreteSetting) {
|
||||
return key.getNamespace(concreteSetting.getKey());
|
||||
|
@ -1043,7 +1044,15 @@ public class Setting<T> implements ToXContentObject {
|
|||
}
|
||||
|
||||
public static Setting<String> simpleString(String key, Setting<String> fallback, Property... properties) {
|
||||
return new Setting<>(key, fallback, Function.identity(), properties);
|
||||
return simpleString(key, fallback, Function.identity(), properties);
|
||||
}
|
||||
|
||||
public static Setting<String> simpleString(
|
||||
final String key,
|
||||
final Setting<String> fallback,
|
||||
final Function<String, String> parser,
|
||||
final Property... properties) {
|
||||
return new Setting<>(key, fallback, parser, properties);
|
||||
}
|
||||
|
||||
public static Setting<String> simpleString(String key, Validator<String> validator, Property... properties) {
|
||||
|
@ -1275,15 +1284,41 @@ public class Setting<T> implements ToXContentObject {
|
|||
return new GroupSetting(key, validator, properties);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(String key, Function<Settings, TimeValue> defaultValue, TimeValue minValue,
|
||||
Property... properties) {
|
||||
return new Setting<>(key, (s) -> defaultValue.apply(s).getStringRep(), (s) -> {
|
||||
TimeValue timeValue = TimeValue.parseTimeValue(s, null, key);
|
||||
if (timeValue.millis() < minValue.millis()) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
public static Setting<TimeValue> timeSetting(
|
||||
final String key,
|
||||
final Setting<TimeValue> fallbackSetting,
|
||||
final TimeValue minValue,
|
||||
final Property... properties) {
|
||||
final SimpleKey simpleKey = new SimpleKey(key);
|
||||
return new Setting<>(
|
||||
simpleKey,
|
||||
fallbackSetting,
|
||||
fallbackSetting::getRaw,
|
||||
minTimeValueParser(key, minValue),
|
||||
(v, s) -> {},
|
||||
properties);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(
|
||||
final String key, Function<Settings, TimeValue> defaultValue, final TimeValue minValue, final Property... properties) {
|
||||
final SimpleKey simpleKey = new SimpleKey(key);
|
||||
return new Setting<>(simpleKey, s -> defaultValue.apply(s).getStringRep(), minTimeValueParser(key, minValue), properties);
|
||||
}
|
||||
|
||||
private static Function<String, TimeValue> minTimeValueParser(final String key, final TimeValue minValue) {
|
||||
return s -> {
|
||||
final TimeValue value = TimeValue.parseTimeValue(s, null, key);
|
||||
if (value.millis() < minValue.millis()) {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"failed to parse value [%s] for setting [%s], must be >= [%s]",
|
||||
s,
|
||||
key,
|
||||
minValue.getStringRep());
|
||||
throw new IllegalArgumentException(message);
|
||||
}
|
||||
return timeValue;
|
||||
}, properties);
|
||||
return value;
|
||||
};
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, TimeValue minValue, Property... properties) {
|
||||
|
@ -1302,6 +1337,14 @@ public class Setting<T> implements ToXContentObject {
|
|||
return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> positiveTimeSetting(
|
||||
final String key,
|
||||
final Setting<TimeValue> fallbackSetting,
|
||||
final TimeValue minValue,
|
||||
final Property... properties) {
|
||||
return timeSetting(key, fallbackSetting, minValue, properties);
|
||||
}
|
||||
|
||||
public static Setting<Double> doubleSetting(String key, double defaultValue, double minValue, Property... properties) {
|
||||
return doubleSetting(key, defaultValue, minValue, Double.POSITIVE_INFINITY, properties);
|
||||
}
|
||||
|
|
|
@ -2014,7 +2014,9 @@ public class InternalEngine extends Engine {
|
|||
/* Acquire order here is store -> manager since we need
|
||||
* to make sure that the store is not closed before
|
||||
* the searcher is acquired. */
|
||||
store.incRef();
|
||||
if (store.tryIncRef() == false) {
|
||||
throw new AlreadyClosedException(shardId + " store is closed", failedEngine.get());
|
||||
}
|
||||
Releasable releasable = store::decRef;
|
||||
try {
|
||||
final ReferenceManager<IndexSearcher> referenceManager;
|
||||
|
|
|
@ -645,10 +645,14 @@ public final class IngestDocument {
|
|||
* @throws Exception On exception in pipeline execution
|
||||
*/
|
||||
public IngestDocument executePipeline(Pipeline pipeline) throws Exception {
|
||||
if (this.executedPipelines.add(pipeline) == false) {
|
||||
throw new IllegalStateException("Recursive invocation of pipeline [" + pipeline.getId() + "] detected.");
|
||||
try {
|
||||
if (this.executedPipelines.add(pipeline) == false) {
|
||||
throw new IllegalStateException("Recursive invocation of pipeline [" + pipeline.getId() + "] detected.");
|
||||
}
|
||||
return pipeline.execute(this);
|
||||
} finally {
|
||||
executedPipelines.remove(pipeline);
|
||||
}
|
||||
return pipeline.execute(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -150,6 +150,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
import org.elasticsearch.usage.UsageService;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
|
||||
import javax.net.ssl.SNIHostName;
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -209,6 +210,13 @@ public class Node implements Closeable {
|
|||
throw new IllegalArgumentException(key + " cannot have leading or trailing whitespace " +
|
||||
"[" + value + "]");
|
||||
}
|
||||
if (value.length() > 0 && "node.attr.server_name".equals(key)) {
|
||||
try {
|
||||
new SNIHostName(value);
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new IllegalArgumentException("invalid node.attr.server_name [" + value + "]", e );
|
||||
}
|
||||
}
|
||||
return value;
|
||||
}, Property.NodeScope));
|
||||
public static final Setting<String> BREAKER_TYPE_KEY = new Setting<>("indices.breaker.type", "hierarchy", (s) -> {
|
||||
|
|
|
@ -16,10 +16,9 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.EnumSet;
|
||||
import java.util.function.Supplier;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -37,11 +36,13 @@ import java.net.InetSocketAddress;
|
|||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
|
@ -50,40 +51,83 @@ import java.util.stream.Stream;
|
|||
*/
|
||||
public abstract class RemoteClusterAware extends AbstractComponent {
|
||||
|
||||
public static final Setting.AffixSetting<List<String>> SEARCH_REMOTE_CLUSTERS_SEEDS =
|
||||
Setting.affixKeySetting(
|
||||
"search.remote.",
|
||||
"seeds",
|
||||
key -> Setting.listSetting(
|
||||
key,
|
||||
Collections.emptyList(),
|
||||
s -> {
|
||||
parsePort(s);
|
||||
return s;
|
||||
},
|
||||
Setting.Property.Deprecated,
|
||||
Setting.Property.Dynamic,
|
||||
Setting.Property.NodeScope));
|
||||
|
||||
/**
|
||||
* A list of initial seed nodes to discover eligible nodes from the remote cluster
|
||||
*/
|
||||
public static final Setting.AffixSetting<List<String>> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting(
|
||||
"search.remote.",
|
||||
"seeds",
|
||||
key -> Setting.listSetting(
|
||||
key, Collections.emptyList(),
|
||||
s -> {
|
||||
// validate seed address
|
||||
parsePort(s);
|
||||
return s;
|
||||
},
|
||||
Setting.Property.NodeScope,
|
||||
Setting.Property.Dynamic
|
||||
)
|
||||
);
|
||||
"cluster.remote.",
|
||||
"seeds",
|
||||
key -> Setting.listSetting(
|
||||
key,
|
||||
// the default needs to be emptyList() when fallback is removed
|
||||
"_na_".equals(key)
|
||||
? SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(key)
|
||||
: SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSetting(key.replaceAll("^cluster", "search")),
|
||||
s -> {
|
||||
// validate seed address
|
||||
parsePort(s);
|
||||
return s;
|
||||
},
|
||||
Setting.Property.Dynamic,
|
||||
Setting.Property.NodeScope));
|
||||
|
||||
public static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':';
|
||||
public static final String LOCAL_CLUSTER_GROUP_KEY = "";
|
||||
|
||||
public static final Setting.AffixSetting<String> SEARCH_REMOTE_CLUSTERS_PROXY = Setting.affixKeySetting(
|
||||
"search.remote.",
|
||||
"proxy",
|
||||
key -> Setting.simpleString(
|
||||
key,
|
||||
s -> {
|
||||
if (Strings.hasLength(s)) {
|
||||
parsePort(s);
|
||||
}
|
||||
return s;
|
||||
},
|
||||
Setting.Property.Deprecated,
|
||||
Setting.Property.Dynamic,
|
||||
Setting.Property.NodeScope),
|
||||
REMOTE_CLUSTERS_SEEDS);
|
||||
|
||||
/**
|
||||
* A proxy address for the remote cluster.
|
||||
* NOTE: this settings is undocumented until we have at last one transport that supports passing
|
||||
* on the hostname via a mechanism like SNI.
|
||||
*/
|
||||
public static final Setting.AffixSetting<String> REMOTE_CLUSTERS_PROXY = Setting.affixKeySetting(
|
||||
"search.remote.",
|
||||
"proxy",
|
||||
key -> Setting.simpleString(key, s -> {
|
||||
if (Strings.hasLength(s)) {
|
||||
parsePort(s);
|
||||
}
|
||||
return s;
|
||||
}, Setting.Property.NodeScope, Setting.Property.Dynamic), REMOTE_CLUSTERS_SEEDS);
|
||||
"cluster.remote.",
|
||||
"proxy",
|
||||
key -> Setting.simpleString(
|
||||
key,
|
||||
// no default is needed when fallback is removed, use simple string which gives empty
|
||||
"_na_".equals(key)
|
||||
? SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(key)
|
||||
: SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSetting(key.replaceAll("^cluster", "search")),
|
||||
s -> {
|
||||
if (Strings.hasLength(s)) {
|
||||
parsePort(s);
|
||||
}
|
||||
return s;
|
||||
},
|
||||
Setting.Property.Dynamic,
|
||||
Setting.Property.NodeScope),
|
||||
REMOTE_CLUSTERS_SEEDS);
|
||||
|
||||
|
||||
protected final ClusterNameExpressionResolver clusterNameResolver;
|
||||
|
@ -105,16 +149,16 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
protected static Map<String, Tuple<String, List<Supplier<DiscoveryNode>>>> buildRemoteClustersDynamicConfig(Settings settings) {
|
||||
Stream<Setting<List<String>>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings);
|
||||
return allConcreteSettings.collect(
|
||||
Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> {
|
||||
String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting);
|
||||
List<String> addresses = concreteSetting.get(settings);
|
||||
final boolean proxyMode = REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterName).exists(settings);
|
||||
List<Supplier<DiscoveryNode>> nodes = new ArrayList<>(addresses.size());
|
||||
for (String address : addresses) {
|
||||
nodes.add(() -> buildSeedNode(clusterName, address, proxyMode));
|
||||
}
|
||||
return new Tuple<>(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterName).get(settings), nodes);
|
||||
}));
|
||||
Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> {
|
||||
String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting);
|
||||
List<String> addresses = concreteSetting.get(settings);
|
||||
final boolean proxyMode = REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterName).exists(settings);
|
||||
List<Supplier<DiscoveryNode>> nodes = new ArrayList<>(addresses.size());
|
||||
for (String address : addresses) {
|
||||
nodes.add(() -> buildSeedNode(clusterName, address, proxyMode));
|
||||
}
|
||||
return new Tuple<>(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterName).get(settings), nodes);
|
||||
}));
|
||||
}
|
||||
|
||||
static DiscoveryNode buildSeedNode(String clusterName, String address, boolean proxyMode) {
|
||||
|
@ -122,14 +166,14 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
TransportAddress transportAddress = new TransportAddress(TransportAddress.META_ADDRESS, 0);
|
||||
String hostName = address.substring(0, indexOfPortSeparator(address));
|
||||
return new DiscoveryNode("", clusterName + "#" + address, UUIDs.randomBase64UUID(), hostName, address,
|
||||
transportAddress, Collections
|
||||
.emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class),
|
||||
Version.CURRENT.minimumCompatibilityVersion());
|
||||
transportAddress, Collections
|
||||
.emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class),
|
||||
Version.CURRENT.minimumCompatibilityVersion());
|
||||
} else {
|
||||
TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address));
|
||||
return new DiscoveryNode(clusterName + "#" + transportAddress.toString(),
|
||||
transportAddress,
|
||||
Version.CURRENT.minimumCompatibilityVersion());
|
||||
transportAddress,
|
||||
Version.CURRENT.minimumCompatibilityVersion());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,8 +201,8 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
// remote_cluster_alias:index_name - for this case we fail the request. the user can easily change the cluster alias
|
||||
// if that happens
|
||||
throw new IllegalArgumentException("Can not filter indices; index " + index +
|
||||
" exists but there is also a remote cluster named: " + remoteClusterName);
|
||||
}
|
||||
" exists but there is also a remote cluster named: " + remoteClusterName);
|
||||
}
|
||||
String indexName = index.substring(i + 1);
|
||||
for (String clusterName : clusters) {
|
||||
perClusterIndices.computeIfAbsent(clusterName, k -> new ArrayList<>()).add(indexName);
|
||||
|
@ -186,10 +230,16 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
* Registers this instance to listen to updates on the cluster settings.
|
||||
*/
|
||||
public void listenForUpdates(ClusterSettings clusterSettings) {
|
||||
clusterSettings.addAffixUpdateConsumer(RemoteClusterAware.REMOTE_CLUSTERS_PROXY,
|
||||
RemoteClusterAware.REMOTE_CLUSTERS_SEEDS,
|
||||
(key, value) -> updateRemoteCluster(key, value.v2(), value.v1()),
|
||||
(namespace, value) -> {});
|
||||
clusterSettings.addAffixUpdateConsumer(
|
||||
RemoteClusterAware.REMOTE_CLUSTERS_PROXY,
|
||||
RemoteClusterAware.REMOTE_CLUSTERS_SEEDS,
|
||||
(key, value) -> updateRemoteCluster(key, value.v2(), value.v1()),
|
||||
(namespace, value) -> {});
|
||||
clusterSettings.addAffixUpdateConsumer(
|
||||
RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY,
|
||||
RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS,
|
||||
(key, value) -> updateRemoteCluster(key, value.v2(), value.v1()),
|
||||
(namespace, value) -> {});
|
||||
}
|
||||
|
||||
|
||||
|
@ -227,4 +277,5 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
public static String buildRemoteIndexName(String clusterAlias, String indexName) {
|
||||
return clusterAlias != null ? clusterAlias + REMOTE_CLUSTER_INDEX_SEPARATOR + indexName : indexName;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.Collection;
|
||||
|
@ -64,18 +65,39 @@ import static org.elasticsearch.common.settings.Setting.boolSetting;
|
|||
*/
|
||||
public final class RemoteClusterService extends RemoteClusterAware implements Closeable {
|
||||
|
||||
public static final Setting<Integer> SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER =
|
||||
Setting.intSetting("search.remote.connections_per_cluster", 3, 1, Setting.Property.NodeScope, Setting.Property.Deprecated);
|
||||
|
||||
/**
|
||||
* The maximum number of connections that will be established to a remote cluster. For instance if there is only a single
|
||||
* seed node, other nodes will be discovered up to the given number of nodes in this setting. The default is 3.
|
||||
*/
|
||||
public static final Setting<Integer> REMOTE_CONNECTIONS_PER_CLUSTER = Setting.intSetting("search.remote.connections_per_cluster",
|
||||
3, 1, Setting.Property.NodeScope);
|
||||
public static final Setting<Integer> REMOTE_CONNECTIONS_PER_CLUSTER =
|
||||
Setting.intSetting(
|
||||
"cluster.remote.connections_per_cluster",
|
||||
SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER, // the default needs to three when fallback is removed
|
||||
1,
|
||||
Setting.Property.NodeScope);
|
||||
|
||||
public static final Setting<TimeValue> SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING =
|
||||
Setting.positiveTimeSetting(
|
||||
"search.remote.initial_connect_timeout",
|
||||
TimeValue.timeValueSeconds(30),
|
||||
Setting.Property.NodeScope,
|
||||
Setting.Property.Deprecated);
|
||||
|
||||
/**
|
||||
* The initial connect timeout for remote cluster connections
|
||||
*/
|
||||
public static final Setting<TimeValue> REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING =
|
||||
Setting.positiveTimeSetting("search.remote.initial_connect_timeout", TimeValue.timeValueSeconds(30), Setting.Property.NodeScope);
|
||||
Setting.positiveTimeSetting(
|
||||
"cluster.remote.initial_connect_timeout",
|
||||
SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, // the default needs to be thirty seconds when fallback is removed
|
||||
TimeValue.timeValueSeconds(30),
|
||||
Setting.Property.NodeScope);
|
||||
|
||||
public static final Setting<String> SEARCH_REMOTE_NODE_ATTRIBUTE =
|
||||
Setting.simpleString("search.remote.node.attr", Setting.Property.NodeScope, Setting.Property.Deprecated);
|
||||
|
||||
/**
|
||||
* The name of a node attribute to select nodes that should be connected to in the remote cluster.
|
||||
|
@ -83,20 +105,46 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
* clusters. In that case {@code search.remote.node.attr: gateway} can be used to filter out other nodes in the remote cluster.
|
||||
* The value of the setting is expected to be a boolean, {@code true} for nodes that can become gateways, {@code false} otherwise.
|
||||
*/
|
||||
public static final Setting<String> REMOTE_NODE_ATTRIBUTE = Setting.simpleString("search.remote.node.attr",
|
||||
Setting.Property.NodeScope);
|
||||
public static final Setting<String> REMOTE_NODE_ATTRIBUTE =
|
||||
Setting.simpleString(
|
||||
"cluster.remote.node.attr",
|
||||
SEARCH_REMOTE_NODE_ATTRIBUTE, // no default is needed when fallback is removed, use simple string which gives empty
|
||||
Setting.Property.NodeScope);
|
||||
|
||||
public static final Setting<Boolean> SEARCH_ENABLE_REMOTE_CLUSTERS =
|
||||
Setting.boolSetting("search.remote.connect", true, Setting.Property.NodeScope, Setting.Property.Deprecated);
|
||||
|
||||
/**
|
||||
* If <code>true</code> connecting to remote clusters is supported on this node. If <code>false</code> this node will not establish
|
||||
* connections to any remote clusters configured. Search requests executed against this node (where this node is the coordinating node)
|
||||
* will fail if remote cluster syntax is used as an index pattern. The default is <code>true</code>
|
||||
*/
|
||||
public static final Setting<Boolean> ENABLE_REMOTE_CLUSTERS = Setting.boolSetting("search.remote.connect", true,
|
||||
Setting.Property.NodeScope);
|
||||
public static final Setting<Boolean> ENABLE_REMOTE_CLUSTERS =
|
||||
Setting.boolSetting(
|
||||
"cluster.remote.connect",
|
||||
SEARCH_ENABLE_REMOTE_CLUSTERS, // the default needs to be true when fallback is removed
|
||||
Setting.Property.NodeScope);
|
||||
|
||||
public static final Setting.AffixSetting<Boolean> SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE =
|
||||
Setting.affixKeySetting(
|
||||
"search.remote.",
|
||||
"skip_unavailable",
|
||||
key -> boolSetting(key, false, Setting.Property.Deprecated, Setting.Property.Dynamic, Setting.Property.NodeScope),
|
||||
REMOTE_CLUSTERS_SEEDS);
|
||||
|
||||
public static final Setting.AffixSetting<Boolean> REMOTE_CLUSTER_SKIP_UNAVAILABLE =
|
||||
Setting.affixKeySetting("search.remote.", "skip_unavailable",
|
||||
key -> boolSetting(key, false, Setting.Property.NodeScope, Setting.Property.Dynamic), REMOTE_CLUSTERS_SEEDS);
|
||||
Setting.affixKeySetting(
|
||||
"cluster.remote.",
|
||||
"skip_unavailable",
|
||||
key -> boolSetting(
|
||||
key,
|
||||
// the default needs to be false when fallback is removed
|
||||
"_na_".equals(key)
|
||||
? SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(key)
|
||||
: SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSetting(key.replaceAll("^cluster", "search")),
|
||||
Setting.Property.Dynamic,
|
||||
Setting.Property.NodeScope),
|
||||
REMOTE_CLUSTERS_SEEDS);
|
||||
|
||||
private static final Predicate<DiscoveryNode> DEFAULT_NODE_PREDICATE = (node) -> Version.CURRENT.isCompatible(node.getVersion())
|
||||
&& (node.isMasterNode() == false || node.isDataNode() || node.isIngestNode());
|
||||
|
@ -144,27 +192,27 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
|
||||
if (remote == null) { // this is a new cluster we have to add a new representation
|
||||
remote = new RemoteClusterConnection(settings, entry.getKey(), seedList, transportService,
|
||||
new ConnectionManager(settings, transportService.transport, transportService.threadPool), numRemoteConnections,
|
||||
getNodePredicate(settings), proxyAddress);
|
||||
new ConnectionManager(settings, transportService.transport, transportService.threadPool), numRemoteConnections,
|
||||
getNodePredicate(settings), proxyAddress);
|
||||
remoteClusters.put(entry.getKey(), remote);
|
||||
}
|
||||
|
||||
// now update the seed nodes no matter if it's new or already existing
|
||||
RemoteClusterConnection finalRemote = remote;
|
||||
remote.updateSeedNodes(proxyAddress, seedList, ActionListener.wrap(
|
||||
response -> {
|
||||
if (countDown.countDown()) {
|
||||
connectionListener.onResponse(response);
|
||||
}
|
||||
},
|
||||
exception -> {
|
||||
if (countDown.fastForward()) {
|
||||
connectionListener.onFailure(exception);
|
||||
}
|
||||
if (finalRemote.isClosed() == false) {
|
||||
logger.warn("failed to update seed list for cluster: " + entry.getKey(), exception);
|
||||
}
|
||||
}));
|
||||
response -> {
|
||||
if (countDown.countDown()) {
|
||||
connectionListener.onResponse(response);
|
||||
}
|
||||
},
|
||||
exception -> {
|
||||
if (countDown.fastForward()) {
|
||||
connectionListener.onFailure(exception);
|
||||
}
|
||||
if (finalRemote.isClosed() == false) {
|
||||
logger.warn("failed to update seed list for cluster: " + entry.getKey(), exception);
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
this.remoteClusters = Collections.unmodifiableMap(remoteClusters);
|
||||
|
@ -198,7 +246,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
String clusterAlias = entry.getKey();
|
||||
List<String> originalIndices = entry.getValue();
|
||||
originalIndicesMap.put(clusterAlias,
|
||||
new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), indicesOptions));
|
||||
new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), indicesOptions));
|
||||
}
|
||||
if (originalIndicesMap.containsKey(LOCAL_CLUSTER_GROUP_KEY) == false) {
|
||||
originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(Strings.EMPTY_ARRAY, indicesOptions));
|
||||
|
@ -230,38 +278,38 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
}
|
||||
final String[] indices = entry.getValue().indices();
|
||||
ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices)
|
||||
.indicesOptions(indicesOptions).local(true).preference(preference)
|
||||
.routing(routing);
|
||||
.indicesOptions(indicesOptions).local(true).preference(preference)
|
||||
.routing(routing);
|
||||
remoteClusterConnection.fetchSearchShards(searchShardsRequest,
|
||||
new ActionListener<ClusterSearchShardsResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) {
|
||||
searchShardsResponses.put(clusterName, clusterSearchShardsResponse);
|
||||
if (responsesCountDown.countDown()) {
|
||||
RemoteTransportException exception = transportException.get();
|
||||
if (exception == null) {
|
||||
listener.onResponse(searchShardsResponses);
|
||||
} else {
|
||||
listener.onFailure(transportException.get());
|
||||
new ActionListener<ClusterSearchShardsResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) {
|
||||
searchShardsResponses.put(clusterName, clusterSearchShardsResponse);
|
||||
if (responsesCountDown.countDown()) {
|
||||
RemoteTransportException exception = transportException.get();
|
||||
if (exception == null) {
|
||||
listener.onResponse(searchShardsResponses);
|
||||
} else {
|
||||
listener.onFailure(transportException.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
RemoteTransportException exception = new RemoteTransportException("error while communicating with remote cluster ["
|
||||
+ clusterName + "]", e);
|
||||
if (transportException.compareAndSet(null, exception) == false) {
|
||||
exception = transportException.accumulateAndGet(exception, (previous, current) -> {
|
||||
current.addSuppressed(previous);
|
||||
return current;
|
||||
});
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
RemoteTransportException exception =
|
||||
new RemoteTransportException("error while communicating with remote cluster [" + clusterName + "]", e);
|
||||
if (transportException.compareAndSet(null, exception) == false) {
|
||||
exception = transportException.accumulateAndGet(exception, (previous, current) -> {
|
||||
current.addSuppressed(previous);
|
||||
return current;
|
||||
});
|
||||
}
|
||||
if (responsesCountDown.countDown()) {
|
||||
listener.onFailure(exception);
|
||||
}
|
||||
}
|
||||
if (responsesCountDown.countDown()) {
|
||||
listener.onFailure(exception);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -306,6 +354,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
public void listenForUpdates(ClusterSettings clusterSettings) {
|
||||
super.listenForUpdates(clusterSettings);
|
||||
clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {});
|
||||
clusterSettings.addAffixUpdateConsumer(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {});
|
||||
}
|
||||
|
||||
synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavailable) {
|
||||
|
@ -327,7 +376,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
final String proxyAddress,
|
||||
final ActionListener<Void> connectionListener) {
|
||||
final List<Supplier<DiscoveryNode>> nodes = addresses.stream().<Supplier<DiscoveryNode>>map(address -> () ->
|
||||
buildSeedNode(clusterAlias, address, Strings.hasLength(proxyAddress))
|
||||
buildSeedNode(clusterAlias, address, Strings.hasLength(proxyAddress))
|
||||
).collect(Collectors.toList());
|
||||
updateRemoteClusters(Collections.singletonMap(clusterAlias, new Tuple<>(proxyAddress, nodes)), connectionListener);
|
||||
}
|
||||
|
@ -387,7 +436,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
}
|
||||
if (countDown.countDown()) {
|
||||
listener.onResponse((clusterAlias, nodeId)
|
||||
-> clusterMap.getOrDefault(clusterAlias, nullFunction).apply(nodeId));
|
||||
-> clusterMap.getOrDefault(clusterAlias, nullFunction).apply(nodeId));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -418,4 +467,5 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
Collection<RemoteClusterConnection> getConnections() {
|
||||
return remoteClusters.values();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -207,6 +207,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
private final MeanMetric transmittedBytesMetric = new MeanMetric();
|
||||
private volatile Map<String, RequestHandlerRegistry> requestHandlers = Collections.emptyMap();
|
||||
private final ResponseHandlers responseHandlers = new ResponseHandlers();
|
||||
private final TransportLogger transportLogger;
|
||||
private final BytesReference pingMessage;
|
||||
|
||||
public TcpTransport(String transportName, Settings settings, ThreadPool threadPool, BigArrays bigArrays,
|
||||
|
@ -221,6 +222,8 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings);
|
||||
this.networkService = networkService;
|
||||
this.transportName = transportName;
|
||||
this.transportLogger = new TransportLogger(settings);
|
||||
|
||||
final Settings defaultFeatures = DEFAULT_FEATURES_SETTING.get(settings);
|
||||
if (defaultFeatures == null) {
|
||||
this.features = new String[0];
|
||||
|
@ -788,7 +791,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
// in case we are able to return data, serialize the exception content and sent it back to the client
|
||||
if (channel.isOpen()) {
|
||||
BytesArray message = new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8));
|
||||
final SendMetricListener closeChannel = new SendMetricListener(message.length()) {
|
||||
final SendMetricListener listener = new SendMetricListener(message.length()) {
|
||||
@Override
|
||||
protected void innerInnerOnResponse(Void v) {
|
||||
CloseableChannel.closeChannel(channel);
|
||||
|
@ -800,7 +803,14 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
CloseableChannel.closeChannel(channel);
|
||||
}
|
||||
};
|
||||
internalSendMessage(channel, message, closeChannel);
|
||||
// We do not call internalSendMessage because we are not sending a message that is an
|
||||
// elasticsearch binary message. We are just serializing an exception here. Not formatting it
|
||||
// as an elasticsearch transport message.
|
||||
try {
|
||||
channel.sendMessage(message, listener);
|
||||
} catch (Exception ex) {
|
||||
listener.onFailure(ex);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.warn(() -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e);
|
||||
|
@ -906,6 +916,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
* sends a message to the given channel, using the given callbacks.
|
||||
*/
|
||||
private void internalSendMessage(TcpChannel channel, BytesReference message, SendMetricListener listener) {
|
||||
transportLogger.logOutboundMessage(channel, message);
|
||||
try {
|
||||
channel.sendMessage(message, listener);
|
||||
} catch (Exception ex) {
|
||||
|
@ -1050,6 +1061,24 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
return new CompositeBytesReference(header, messageBody, zeroCopyBuffer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles inbound message that has been decoded.
|
||||
*
|
||||
* @param channel the channel the message if fomr
|
||||
* @param message the message
|
||||
*/
|
||||
public void inboundMessage(TcpChannel channel, BytesReference message) {
|
||||
try {
|
||||
transportLogger.logInboundMessage(channel, message);
|
||||
// Message length of 0 is a ping
|
||||
if (message.length() != 0) {
|
||||
messageReceived(message, channel);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
onException(channel, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Consumes bytes that are available from network reads. This method returns the number of bytes consumed
|
||||
* in this call.
|
||||
|
@ -1067,15 +1096,8 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
|
||||
if (message == null) {
|
||||
return 0;
|
||||
} else if (message.length() == 0) {
|
||||
// This is a ping and should not be handled.
|
||||
return BYTES_NEEDED_FOR_MESSAGE_SIZE;
|
||||
} else {
|
||||
try {
|
||||
messageReceived(message, channel);
|
||||
} catch (Exception e) {
|
||||
onException(channel, e);
|
||||
}
|
||||
inboundMessage(channel, message);
|
||||
return message.length() + BYTES_NEEDED_FOR_MESSAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
@ -1091,7 +1113,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
* @throws IllegalArgumentException if the message length is greater that the maximum allowed frame size.
|
||||
* This is dependent on the available memory.
|
||||
*/
|
||||
public static BytesReference decodeFrame(BytesReference networkBytes) throws IOException {
|
||||
static BytesReference decodeFrame(BytesReference networkBytes) throws IOException {
|
||||
int messageLength = readMessageLength(networkBytes);
|
||||
if (messageLength == -1) {
|
||||
return null;
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.compress.NotCompressedException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public final class TransportLogger {
|
||||
|
||||
private final Logger logger;
|
||||
private static final int HEADER_SIZE = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE;
|
||||
|
||||
TransportLogger(Settings settings) {
|
||||
logger = Loggers.getLogger(TransportLogger.class, settings);
|
||||
}
|
||||
|
||||
void logInboundMessage(TcpChannel channel, BytesReference message) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
try {
|
||||
String logMessage = format(channel, message, "READ");
|
||||
logger.trace(logMessage);
|
||||
} catch (IOException e) {
|
||||
logger.trace("an exception occurred formatting a READ trace message", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void logOutboundMessage(TcpChannel channel, BytesReference message) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
try {
|
||||
BytesReference withoutHeader = message.slice(HEADER_SIZE, message.length() - HEADER_SIZE);
|
||||
String logMessage = format(channel, withoutHeader, "WRITE");
|
||||
logger.trace(logMessage);
|
||||
} catch (IOException e) {
|
||||
logger.trace("an exception occurred formatting a WRITE trace message", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String format(TcpChannel channel, BytesReference message, String event) throws IOException {
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
sb.append(channel);
|
||||
int messageLengthWithHeader = HEADER_SIZE + message.length();
|
||||
// This is a ping
|
||||
if (message.length() == 0) {
|
||||
sb.append(" [ping]").append(' ').append(event).append(": ").append(messageLengthWithHeader).append('B');
|
||||
} else {
|
||||
boolean success = false;
|
||||
StreamInput streamInput = message.streamInput();
|
||||
try {
|
||||
final long requestId = streamInput.readLong();
|
||||
final byte status = streamInput.readByte();
|
||||
final boolean isRequest = TransportStatus.isRequest(status);
|
||||
final String type = isRequest ? "request" : "response";
|
||||
final String version = Version.fromId(streamInput.readInt()).toString();
|
||||
sb.append(" [length: ").append(messageLengthWithHeader);
|
||||
sb.append(", request id: ").append(requestId);
|
||||
sb.append(", type: ").append(type);
|
||||
sb.append(", version: ").append(version);
|
||||
|
||||
if (isRequest) {
|
||||
if (TransportStatus.isCompress(status)) {
|
||||
Compressor compressor;
|
||||
try {
|
||||
final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE;
|
||||
compressor = CompressorFactory.compressor(message.slice(bytesConsumed, message.length() - bytesConsumed));
|
||||
} catch (NotCompressedException ex) {
|
||||
throw new IllegalStateException(ex);
|
||||
}
|
||||
streamInput = compressor.streamInput(streamInput);
|
||||
}
|
||||
|
||||
try (ThreadContext context = new ThreadContext(Settings.EMPTY)) {
|
||||
context.readHeaders(streamInput);
|
||||
}
|
||||
// now we decode the features
|
||||
if (streamInput.getVersion().onOrAfter(Version.V_6_3_0)) {
|
||||
streamInput.readStringArray();
|
||||
}
|
||||
sb.append(", action: ").append(streamInput.readString());
|
||||
}
|
||||
sb.append(']');
|
||||
sb.append(' ').append(event).append(": ").append(messageLengthWithHeader).append('B');
|
||||
success = true;
|
||||
} finally {
|
||||
if (success) {
|
||||
IOUtils.close(streamInput);
|
||||
} else {
|
||||
IOUtils.closeWhileHandlingException(streamInput);
|
||||
}
|
||||
}
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
|
@ -60,7 +60,7 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
|||
final boolean shard2 = randomBoolean();
|
||||
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
|
||||
@Override
|
||||
public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRequest request, SearchTask task,
|
||||
|
@ -119,7 +119,7 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
|||
lookup.put("node2", new SearchAsyncActionTests.MockConnection(replicaNode));
|
||||
final boolean shard1 = randomBoolean();
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
|
||||
@Override
|
||||
public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRequest request, SearchTask task,
|
||||
|
@ -186,7 +186,7 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
|||
|
||||
|
||||
final SearchTransportService searchTransportService =
|
||||
new SearchTransportService(Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
new SearchTransportService(Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
@Override
|
||||
public void sendCanMatch(
|
||||
Transport.Connection connection,
|
||||
|
|
|
@ -60,7 +60,7 @@ public class DfsQueryPhaseTests extends ESTestCase {
|
|||
SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY,
|
||||
(b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b));
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
|
||||
@Override
|
||||
public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task,
|
||||
|
@ -118,7 +118,7 @@ public class DfsQueryPhaseTests extends ESTestCase {
|
|||
SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY,
|
||||
(b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b));
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
|
||||
@Override
|
||||
public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task,
|
||||
|
@ -175,7 +175,7 @@ public class DfsQueryPhaseTests extends ESTestCase {
|
|||
SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY,
|
||||
(b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b));
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
|
||||
@Override
|
||||
public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task,
|
||||
|
|
|
@ -70,7 +70,7 @@ public class ExpandSearchPhaseTests extends ESTestCase {
|
|||
.collect(Collectors.toList()))));
|
||||
mockSearchPhaseContext.getRequest().source().query(originalQuery);
|
||||
mockSearchPhaseContext.searchTransport = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
|
||||
@Override
|
||||
void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener<MultiSearchResponse> listener) {
|
||||
|
@ -145,7 +145,7 @@ public class ExpandSearchPhaseTests extends ESTestCase {
|
|||
mockSearchPhaseContext.getRequest().source(new SearchSourceBuilder()
|
||||
.collapse(new CollapseBuilder("someField").setInnerHits(new InnerHitBuilder().setName("foobarbaz"))));
|
||||
mockSearchPhaseContext.searchTransport = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
|
||||
@Override
|
||||
void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener<MultiSearchResponse> listener) {
|
||||
|
@ -187,7 +187,7 @@ public class ExpandSearchPhaseTests extends ESTestCase {
|
|||
public void testSkipPhase() throws IOException {
|
||||
MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1);
|
||||
mockSearchPhaseContext.searchTransport = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
|
||||
@Override
|
||||
void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener<MultiSearchResponse> listener) {
|
||||
|
@ -218,7 +218,7 @@ public class ExpandSearchPhaseTests extends ESTestCase {
|
|||
public void testSkipExpandCollapseNoHits() throws IOException {
|
||||
MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1);
|
||||
mockSearchPhaseContext.searchTransport = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
|
||||
@Override
|
||||
void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener<MultiSearchResponse> listener) {
|
||||
|
@ -250,7 +250,7 @@ public class ExpandSearchPhaseTests extends ESTestCase {
|
|||
boolean version = randomBoolean();
|
||||
|
||||
mockSearchPhaseContext.searchTransport = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
|
||||
@Override
|
||||
void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener<MultiSearchResponse> listener) {
|
||||
|
|
|
@ -106,7 +106,7 @@ public class FetchSearchPhaseTests extends ESTestCase {
|
|||
results.consumeResult(queryResult);
|
||||
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
@Override
|
||||
public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task,
|
||||
SearchActionListener<FetchSearchResult> listener) {
|
||||
|
@ -161,7 +161,7 @@ public class FetchSearchPhaseTests extends ESTestCase {
|
|||
results.consumeResult(queryResult);
|
||||
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
@Override
|
||||
public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task,
|
||||
SearchActionListener<FetchSearchResult> listener) {
|
||||
|
@ -215,7 +215,7 @@ public class FetchSearchPhaseTests extends ESTestCase {
|
|||
results.consumeResult(queryResult);
|
||||
}
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
@Override
|
||||
public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task,
|
||||
SearchActionListener<FetchSearchResult> listener) {
|
||||
|
@ -277,7 +277,7 @@ public class FetchSearchPhaseTests extends ESTestCase {
|
|||
results.consumeResult(queryResult);
|
||||
AtomicInteger numFetches = new AtomicInteger(0);
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
@Override
|
||||
public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task,
|
||||
SearchActionListener<FetchSearchResult> listener) {
|
||||
|
@ -331,7 +331,7 @@ public class FetchSearchPhaseTests extends ESTestCase {
|
|||
results.consumeResult(queryResult);
|
||||
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("cluster.remote.connect", false).build(), null, null) {
|
||||
@Override
|
||||
public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task,
|
||||
SearchActionListener<FetchSearchResult> listener) {
|
||||
|
|
|
@ -286,7 +286,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
.get();
|
||||
fail("bogus value");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse value [-1] for setting [discovery.zen.publish_timeout] must be >= 0s");
|
||||
assertEquals(ex.getMessage(), "failed to parse value [-1] for setting [discovery.zen.publish_timeout], must be >= [0ms]");
|
||||
}
|
||||
|
||||
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L));
|
||||
|
|
|
@ -318,7 +318,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
createIndex("test", settings);
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("Failed to parse value [0ms] for setting [index.translog.sync_interval] must be >= 100ms", ex.getMessage());
|
||||
assertEquals("failed to parse value [0ms] for setting [index.translog.sync_interval], must be >= [100ms]", ex.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5020,6 +5020,11 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
assertThat(engine.lastRefreshedCheckpoint(), equalTo(engine.getLocalCheckpoint()));
|
||||
}
|
||||
|
||||
public void testAcquireSearcherOnClosingEngine() throws Exception {
|
||||
engine.close();
|
||||
expectThrows(AlreadyClosedException.class, () -> engine.acquireSearcher("test"));
|
||||
}
|
||||
|
||||
private static void trimUnsafeCommits(EngineConfig config) throws IOException {
|
||||
final Store store = config.getStore();
|
||||
final TranslogConfig translogConfig = config.getTranslogConfig();
|
||||
|
|
|
@ -19,14 +19,12 @@
|
|||
|
||||
package org.elasticsearch.index.engine;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.SnapshotMatchers;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
|
@ -202,7 +200,7 @@ public class LuceneChangesSnapshotTests extends EngineTestCase {
|
|||
CountDownLatch readyLatch = new CountDownLatch(followers.length + 1);
|
||||
AtomicBoolean isDone = new AtomicBoolean();
|
||||
for (int i = 0; i < followers.length; i++) {
|
||||
followers[i] = new Follower(engine, isDone, readyLatch, createTempDir());
|
||||
followers[i] = new Follower(engine, isDone, readyLatch);
|
||||
followers[i].start();
|
||||
}
|
||||
boolean onPrimary = randomBoolean();
|
||||
|
@ -228,28 +226,30 @@ public class LuceneChangesSnapshotTests extends EngineTestCase {
|
|||
operations.add(op);
|
||||
}
|
||||
readyLatch.countDown();
|
||||
readyLatch.await();
|
||||
concurrentlyApplyOps(operations, engine);
|
||||
assertThat(engine.getLocalCheckpointTracker().getCheckpoint(), equalTo(operations.size() - 1L));
|
||||
isDone.set(true);
|
||||
for (Follower follower : followers) {
|
||||
follower.join();
|
||||
IOUtils.close(follower.engine, follower.engine.store);
|
||||
}
|
||||
}
|
||||
|
||||
class Follower extends Thread {
|
||||
private final Engine leader;
|
||||
private final InternalEngine engine;
|
||||
private final TranslogHandler translogHandler;
|
||||
private final AtomicBoolean isDone;
|
||||
private final CountDownLatch readLatch;
|
||||
private final Path translogPath;
|
||||
|
||||
Follower(Engine leader, AtomicBoolean isDone, CountDownLatch readLatch, Path translogPath) {
|
||||
Follower(Engine leader, AtomicBoolean isDone, CountDownLatch readLatch) throws IOException {
|
||||
this.leader = leader;
|
||||
this.isDone = isDone;
|
||||
this.readLatch = readLatch;
|
||||
this.translogHandler = new TranslogHandler(xContentRegistry(), IndexSettingsModule.newIndexSettings(shardId.getIndexName(),
|
||||
engine.engineConfig.getIndexSettings().getSettings()));
|
||||
this.translogPath = translogPath;
|
||||
leader.engineConfig.getIndexSettings().getSettings()));
|
||||
this.engine = createEngine(createStore(), createTempDir());
|
||||
}
|
||||
|
||||
void pullOperations(Engine follower) throws IOException {
|
||||
|
@ -267,16 +267,15 @@ public class LuceneChangesSnapshotTests extends EngineTestCase {
|
|||
|
||||
@Override
|
||||
public void run() {
|
||||
try (Store store = createStore();
|
||||
InternalEngine follower = createEngine(store, translogPath)) {
|
||||
try {
|
||||
readLatch.countDown();
|
||||
readLatch.await();
|
||||
while (isDone.get() == false ||
|
||||
follower.getLocalCheckpointTracker().getCheckpoint() < leader.getLocalCheckpoint()) {
|
||||
pullOperations(follower);
|
||||
engine.getLocalCheckpointTracker().getCheckpoint() < leader.getLocalCheckpoint()) {
|
||||
pullOperations(engine);
|
||||
}
|
||||
assertConsistentHistoryBetweenTranslogAndLuceneIndex(follower, mapperService);
|
||||
assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true)));
|
||||
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService);
|
||||
assertThat(getDocIds(engine, true), equalTo(getDocIds(leader, true)));
|
||||
} catch (Exception ex) {
|
||||
throw new AssertionError(ex);
|
||||
}
|
||||
|
|
|
@ -125,7 +125,6 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase {
|
|||
closeShards(shard);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33330")
|
||||
public void testSyncerOnClosingShard() throws Exception {
|
||||
IndexShard shard = newStartedShard(true);
|
||||
AtomicBoolean syncActionCalled = new AtomicBoolean();
|
||||
|
|
|
@ -258,7 +258,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase {
|
|||
Exception e = expectThrows(IllegalArgumentException.class,
|
||||
() -> new MockController(Settings.builder()
|
||||
.put("indices.memory.interval", "-42s").build()));
|
||||
assertEquals("Failed to parse value [-42s] for setting [indices.memory.interval] must be >= 0s", e.getMessage());
|
||||
assertEquals("failed to parse value [-42s] for setting [indices.memory.interval], must be >= [0ms]", e.getMessage());
|
||||
|
||||
}
|
||||
|
||||
|
@ -266,7 +266,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase {
|
|||
Exception e = expectThrows(IllegalArgumentException.class,
|
||||
() -> new MockController(Settings.builder()
|
||||
.put("indices.memory.shard_inactive_time", "-42s").build()));
|
||||
assertEquals("Failed to parse value [-42s] for setting [indices.memory.shard_inactive_time] must be >= 0s", e.getMessage());
|
||||
assertEquals("failed to parse value [-42s] for setting [indices.memory.shard_inactive_time], must be >= [0ms]", e.getMessage());
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -156,6 +156,25 @@ public class NodeTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testServerNameNodeAttribute() throws IOException {
|
||||
String attr = "valid-hostname";
|
||||
Settings.Builder settings = baseSettings().put(Node.NODE_ATTRIBUTES.getKey() + "server_name", attr);
|
||||
int i = 0;
|
||||
try (Node node = new MockNode(settings.build(), basePlugins())) {
|
||||
final Settings nodeSettings = randomBoolean() ? node.settings() : node.getEnvironment().settings();
|
||||
assertEquals(attr, Node.NODE_ATTRIBUTES.getAsMap(nodeSettings).get("server_name"));
|
||||
}
|
||||
|
||||
// non-LDH hostname not allowed
|
||||
attr = "invalid_hostname";
|
||||
settings = baseSettings().put(Node.NODE_ATTRIBUTES.getKey() + "server_name", attr);
|
||||
try (Node node = new MockNode(settings.build(), basePlugins())) {
|
||||
fail("should not allow a server_name attribute with an underscore");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("invalid node.attr.server_name [invalid_hostname]", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private static Settings.Builder baseSettings() {
|
||||
final Path tempDir = createTempDir();
|
||||
return Settings.builder()
|
||||
|
|
|
@ -52,7 +52,7 @@ public class RemoteClusterClientTests extends ESTestCase {
|
|||
|
||||
Settings localSettings = Settings.builder()
|
||||
.put(RemoteClusterService.ENABLE_REMOTE_CLUSTERS.getKey(), true)
|
||||
.put("search.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build();
|
||||
.put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build();
|
||||
try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) {
|
||||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
|
@ -77,7 +77,7 @@ public class RemoteClusterClientTests extends ESTestCase {
|
|||
DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode();
|
||||
Settings localSettings = Settings.builder()
|
||||
.put(RemoteClusterService.ENABLE_REMOTE_CLUSTERS.getKey(), true)
|
||||
.put("search.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build();
|
||||
.put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build();
|
||||
try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) {
|
||||
Semaphore semaphore = new Semaphore(1);
|
||||
service.start();
|
||||
|
|
|
@ -98,17 +98,17 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
public void testRemoteClusterSeedSetting() {
|
||||
// simple validation
|
||||
Settings settings = Settings.builder()
|
||||
.put("search.remote.foo.seeds", "192.168.0.1:8080")
|
||||
.put("search.remote.bar.seed", "[::1]:9090").build();
|
||||
.put("cluster.remote.foo.seeds", "192.168.0.1:8080")
|
||||
.put("cluster.remote.bar.seed", "[::1]:9090").build();
|
||||
RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings).forEach(setting -> setting.get(settings));
|
||||
|
||||
Settings brokenSettings = Settings.builder()
|
||||
.put("search.remote.foo.seeds", "192.168.0.1").build();
|
||||
.put("cluster.remote.foo.seeds", "192.168.0.1").build();
|
||||
expectThrows(IllegalArgumentException.class, () ->
|
||||
RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings).forEach(setting -> setting.get(brokenSettings)));
|
||||
|
||||
Settings brokenPortSettings = Settings.builder()
|
||||
.put("search.remote.foo.seeds", "192.168.0.1:123456789123456789").build();
|
||||
.put("cluster.remote.foo.seeds", "192.168.0.1:123456789123456789").build();
|
||||
Exception e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings)
|
||||
|
@ -119,10 +119,10 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
|
||||
public void testBuildRemoteClustersDynamicConfig() throws Exception {
|
||||
Map<String, Tuple<String, List<Supplier<DiscoveryNode>>>> map = RemoteClusterService.buildRemoteClustersDynamicConfig(
|
||||
Settings.builder().put("search.remote.foo.seeds", "192.168.0.1:8080")
|
||||
.put("search.remote.bar.seeds", "[::1]:9090")
|
||||
.put("search.remote.boom.seeds", "boom-node1.internal:1000")
|
||||
.put("search.remote.boom.proxy", "foo.bar.com:1234").build());
|
||||
Settings.builder().put("cluster.remote.foo.seeds", "192.168.0.1:8080")
|
||||
.put("cluster.remote.bar.seeds", "[::1]:9090")
|
||||
.put("cluster.remote.boom.seeds", "boom-node1.internal:1000")
|
||||
.put("cluster.remote.boom.proxy", "foo.bar.com:1234").build());
|
||||
assertEquals(3, map.size());
|
||||
assertTrue(map.containsKey("foo"));
|
||||
assertTrue(map.containsKey("bar"));
|
||||
|
@ -167,8 +167,8 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
Settings.Builder builder = Settings.builder();
|
||||
builder.putList("search.remote.cluster_1.seeds", seedNode.getAddress().toString());
|
||||
builder.putList("search.remote.cluster_2.seeds", otherSeedNode.getAddress().toString());
|
||||
builder.putList("cluster.remote.cluster_1.seeds", seedNode.getAddress().toString());
|
||||
builder.putList("cluster.remote.cluster_2.seeds", otherSeedNode.getAddress().toString());
|
||||
try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) {
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
service.initializeRemoteClusters();
|
||||
|
@ -213,8 +213,8 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
Settings.Builder builder = Settings.builder();
|
||||
builder.putList("search.remote.cluster_1.seeds", seedNode.getAddress().toString());
|
||||
builder.putList("search.remote.cluster_2.seeds", otherSeedNode.getAddress().toString());
|
||||
builder.putList("cluster.remote.cluster_1.seeds", seedNode.getAddress().toString());
|
||||
builder.putList("cluster.remote.cluster_2.seeds", otherSeedNode.getAddress().toString());
|
||||
try (RemoteClusterService service = new RemoteClusterService(Settings.EMPTY, transportService)) {
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
service.initializeRemoteClusters();
|
||||
|
@ -238,7 +238,7 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
|
||||
public void testRemoteNodeAttribute() throws IOException, InterruptedException {
|
||||
final Settings settings =
|
||||
Settings.builder().put("search.remote.node.attr", "gateway").build();
|
||||
Settings.builder().put("cluster.remote.node.attr", "gateway").build();
|
||||
final List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
|
||||
final Settings gateway = Settings.builder().put("node.attr.gateway", true).build();
|
||||
try (MockTransportService c1N1 =
|
||||
|
@ -268,9 +268,9 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
transportService.acceptIncomingRequests();
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
builder.putList(
|
||||
"search.remote.cluster_1.seeds", c1N1Node.getAddress().toString());
|
||||
"cluster.remote.cluster_1.seeds", c1N1Node.getAddress().toString());
|
||||
builder.putList(
|
||||
"search.remote.cluster_2.seeds", c2N1Node.getAddress().toString());
|
||||
"cluster.remote.cluster_2.seeds", c2N1Node.getAddress().toString());
|
||||
try (RemoteClusterService service =
|
||||
new RemoteClusterService(settings, transportService)) {
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
|
@ -335,8 +335,8 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
builder.putList("search.remote.cluster_1.seeds", c1N1Node.getAddress().toString());
|
||||
builder.putList("search.remote.cluster_2.seeds", c2N1Node.getAddress().toString());
|
||||
builder.putList("cluster.remote.cluster_1.seeds", c1N1Node.getAddress().toString());
|
||||
builder.putList("cluster.remote.cluster_2.seeds", c2N1Node.getAddress().toString());
|
||||
try (RemoteClusterService service = new RemoteClusterService(settings, transportService)) {
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
service.initializeRemoteClusters();
|
||||
|
@ -406,9 +406,9 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
transportService.acceptIncomingRequests();
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
builder.putList(
|
||||
"search.remote.cluster_1.seeds", c1N1Node.getAddress().toString());
|
||||
"cluster.remote.cluster_1.seeds", c1N1Node.getAddress().toString());
|
||||
builder.putList(
|
||||
"search.remote.cluster_2.seeds", c2N1Node.getAddress().toString());
|
||||
"cluster.remote.cluster_2.seeds", c2N1Node.getAddress().toString());
|
||||
try (RemoteClusterService service =
|
||||
new RemoteClusterService(settings, transportService)) {
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
|
@ -540,7 +540,7 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
DiscoveryNode remoteSeedNode = remoteSeedTransport.getLocalDiscoNode();
|
||||
knownNodes.add(remoteSeedNode);
|
||||
nodes[i] = remoteSeedNode;
|
||||
builder.put("search.remote.remote" + i + ".seeds", remoteSeedNode.getAddress().toString());
|
||||
builder.put("cluster.remote.remote" + i + ".seeds", remoteSeedNode.getAddress().toString());
|
||||
remoteIndicesByCluster.put("remote" + i, new OriginalIndices(new String[]{"index"}, IndicesOptions.lenientExpandOpen()));
|
||||
}
|
||||
Settings settings = builder.build();
|
||||
|
@ -696,13 +696,13 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
public void testRemoteClusterSkipIfDisconnectedSetting() {
|
||||
{
|
||||
Settings settings = Settings.builder()
|
||||
.put("search.remote.foo.skip_unavailable", true)
|
||||
.put("search.remote.bar.skip_unavailable", false).build();
|
||||
.put("cluster.remote.foo.skip_unavailable", true)
|
||||
.put("cluster.remote.bar.skip_unavailable", false).build();
|
||||
RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getAllConcreteSettings(settings).forEach(setting -> setting.get(settings));
|
||||
}
|
||||
{
|
||||
Settings brokenSettings = Settings.builder()
|
||||
.put("search.remote.foo.skip_unavailable", "broken").build();
|
||||
.put("cluster.remote.foo.skip_unavailable", "broken").build();
|
||||
expectThrows(IllegalArgumentException.class, () ->
|
||||
RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getAllConcreteSettings(brokenSettings)
|
||||
.forEach(setting -> setting.get(brokenSettings)));
|
||||
|
@ -712,22 +712,22 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
new HashSet<>(Arrays.asList(RemoteClusterAware.REMOTE_CLUSTERS_SEEDS,
|
||||
RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE)));
|
||||
{
|
||||
Settings settings = Settings.builder().put("search.remote.foo.skip_unavailable", randomBoolean()).build();
|
||||
Settings settings = Settings.builder().put("cluster.remote.foo.skip_unavailable", randomBoolean()).build();
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> service.validate(settings, true));
|
||||
assertEquals("Missing required setting [search.remote.foo.seeds] for setting [search.remote.foo.skip_unavailable]",
|
||||
assertEquals("Missing required setting [cluster.remote.foo.seeds] for setting [cluster.remote.foo.skip_unavailable]",
|
||||
iae.getMessage());
|
||||
}
|
||||
{
|
||||
try (MockTransportService remoteSeedTransport = startTransport("seed", new CopyOnWriteArrayList<>(), Version.CURRENT)) {
|
||||
String seed = remoteSeedTransport.getLocalDiscoNode().getAddress().toString();
|
||||
service.validate(Settings.builder().put("search.remote.foo.skip_unavailable", randomBoolean())
|
||||
.put("search.remote.foo.seeds", seed).build(), true);
|
||||
service.validate(Settings.builder().put("search.remote.foo.seeds", seed).build(), true);
|
||||
service.validate(Settings.builder().put("cluster.remote.foo.skip_unavailable", randomBoolean())
|
||||
.put("cluster.remote.foo.seeds", seed).build(), true);
|
||||
service.validate(Settings.builder().put("cluster.remote.foo.seeds", seed).build(), true);
|
||||
|
||||
AbstractScopedSettings service2 = new ClusterSettings(Settings.builder().put("search.remote.foo.seeds", seed).build(),
|
||||
AbstractScopedSettings service2 = new ClusterSettings(Settings.builder().put("cluster.remote.foo.seeds", seed).build(),
|
||||
new HashSet<>(Arrays.asList(RemoteClusterAware.REMOTE_CLUSTERS_SEEDS,
|
||||
RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE)));
|
||||
service2.validate(Settings.builder().put("search.remote.foo.skip_unavailable", randomBoolean()).build(), false);
|
||||
service2.validate(Settings.builder().put("cluster.remote.foo.skip_unavailable", randomBoolean()).build(), false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -789,7 +789,7 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
public void testGetNodePredicateNodeAttrs() {
|
||||
TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0);
|
||||
Set<DiscoveryNode.Role> roles = new HashSet<>(EnumSet.allOf(DiscoveryNode.Role.class));
|
||||
Settings settings = Settings.builder().put("search.remote.node.attr", "gateway").build();
|
||||
Settings settings = Settings.builder().put("cluster.remote.node.attr", "gateway").build();
|
||||
Predicate<DiscoveryNode> nodePredicate = RemoteClusterService.getNodePredicate(settings);
|
||||
{
|
||||
DiscoveryNode nonGatewayNode = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "false"),
|
||||
|
@ -812,7 +812,7 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
|
||||
public void testGetNodePredicatesCombination() {
|
||||
TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0);
|
||||
Settings settings = Settings.builder().put("search.remote.node.attr", "gateway").build();
|
||||
Settings settings = Settings.builder().put("cluster.remote.node.attr", "gateway").build();
|
||||
Predicate<DiscoveryNode> nodePredicate = RemoteClusterService.getNodePredicate(settings);
|
||||
Set<DiscoveryNode.Role> allRoles = new HashSet<>(EnumSet.allOf(DiscoveryNode.Role.class));
|
||||
Set<DiscoveryNode.Role> dedicatedMasterRoles = new HashSet<>(EnumSet.of(DiscoveryNode.Role.MASTER));
|
||||
|
@ -861,8 +861,8 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
Settings.Builder builder = Settings.builder();
|
||||
builder.putList("search.remote.cluster_1.seeds", "cluster_1_node0:8080");
|
||||
builder.put("search.remote.cluster_1.proxy", cluster1Proxy);
|
||||
builder.putList("cluster.remote.cluster_1.seeds", "cluster_1_node0:8080");
|
||||
builder.put("cluster.remote.cluster_1.proxy", cluster1Proxy);
|
||||
try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) {
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
service.initializeRemoteClusters();
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.transport.RemoteClusterAware.REMOTE_CLUSTERS_PROXY;
|
||||
import static org.elasticsearch.transport.RemoteClusterAware.REMOTE_CLUSTERS_SEEDS;
|
||||
import static org.elasticsearch.transport.RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY;
|
||||
import static org.elasticsearch.transport.RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS;
|
||||
import static org.elasticsearch.transport.RemoteClusterService.ENABLE_REMOTE_CLUSTERS;
|
||||
import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE;
|
||||
import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER;
|
||||
import static org.elasticsearch.transport.RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING;
|
||||
import static org.elasticsearch.transport.RemoteClusterService.REMOTE_NODE_ATTRIBUTE;
|
||||
import static org.elasticsearch.transport.RemoteClusterService.SEARCH_ENABLE_REMOTE_CLUSTERS;
|
||||
import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE;
|
||||
import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER;
|
||||
import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING;
|
||||
import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_NODE_ATTRIBUTE;
|
||||
import static org.hamcrest.Matchers.emptyCollectionOf;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class RemoteClusterSettingsTests extends ESTestCase {
|
||||
|
||||
public void testConnectionsPerClusterFallback() {
|
||||
final int value = randomIntBetween(1, 8);
|
||||
final Settings settings = Settings.builder().put(SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER.getKey(), value).build();
|
||||
assertThat(REMOTE_CONNECTIONS_PER_CLUSTER.get(settings), equalTo(value));
|
||||
assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER});
|
||||
}
|
||||
|
||||
public void testConnectionsPerClusterDefault() {
|
||||
assertThat(REMOTE_CONNECTIONS_PER_CLUSTER.get(Settings.EMPTY), equalTo(3));
|
||||
}
|
||||
|
||||
public void testInitialConnectTimeoutFallback() {
|
||||
final String value = randomTimeValue(30, 300, "s");
|
||||
final Settings settings = Settings.builder().put(SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.getKey(), value).build();
|
||||
assertThat(
|
||||
REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings),
|
||||
equalTo(TimeValue.parseTimeValue(value, SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.getKey())));
|
||||
assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING});
|
||||
}
|
||||
|
||||
public void testInitialConnectTimeoutDefault() {
|
||||
assertThat(REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(Settings.EMPTY), equalTo(new TimeValue(30, TimeUnit.SECONDS)));
|
||||
}
|
||||
|
||||
public void testRemoteNodeAttributeFallback() {
|
||||
final String attribute = randomAlphaOfLength(8);
|
||||
final Settings settings = Settings.builder().put(SEARCH_REMOTE_NODE_ATTRIBUTE.getKey(), attribute).build();
|
||||
assertThat(REMOTE_NODE_ATTRIBUTE.get(settings), equalTo(attribute));
|
||||
assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_NODE_ATTRIBUTE});
|
||||
}
|
||||
|
||||
public void testRemoteNodeAttributeDefault() {
|
||||
assertThat(REMOTE_NODE_ATTRIBUTE.get(Settings.EMPTY), equalTo(""));
|
||||
}
|
||||
|
||||
public void testEnableRemoteClustersFallback() {
|
||||
final boolean enable = randomBoolean();
|
||||
final Settings settings = Settings.builder().put(SEARCH_ENABLE_REMOTE_CLUSTERS.getKey(), enable).build();
|
||||
assertThat(ENABLE_REMOTE_CLUSTERS.get(settings), equalTo(enable));
|
||||
assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_ENABLE_REMOTE_CLUSTERS});
|
||||
}
|
||||
|
||||
public void testEnableRemoteClustersDefault() {
|
||||
assertTrue(ENABLE_REMOTE_CLUSTERS.get(Settings.EMPTY));
|
||||
}
|
||||
|
||||
public void testSkipUnavailableFallback() {
|
||||
final String alias = randomAlphaOfLength(8);
|
||||
final boolean skip = randomBoolean();
|
||||
final Settings settings =
|
||||
Settings.builder().put(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).getKey(), skip).build();
|
||||
assertThat(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(settings), equalTo(skip));
|
||||
assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias)});
|
||||
}
|
||||
|
||||
public void testSkipUnavailableDefault() {
|
||||
final String alias = randomAlphaOfLength(8);
|
||||
assertFalse(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(Settings.EMPTY));
|
||||
}
|
||||
|
||||
public void testSeedsFallback() {
|
||||
final String alias = randomAlphaOfLength(8);
|
||||
final int numberOfSeeds = randomIntBetween(1, 8);
|
||||
final List<String> seeds = new ArrayList<>(numberOfSeeds);
|
||||
for (int i = 0; i < numberOfSeeds; i++) {
|
||||
seeds.add("localhost:" + Integer.toString(9200 + i));
|
||||
}
|
||||
final Settings settings =
|
||||
Settings.builder()
|
||||
.put(SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias).getKey(), String.join(",", seeds)).build();
|
||||
assertThat(REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias).get(settings), equalTo(seeds));
|
||||
assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias)});
|
||||
}
|
||||
|
||||
public void testSeedsDefault() {
|
||||
final String alias = randomAlphaOfLength(8);
|
||||
assertThat(REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias).get(Settings.EMPTY), emptyCollectionOf(String.class));
|
||||
}
|
||||
|
||||
public void testProxyFallback() {
|
||||
final String alias = randomAlphaOfLength(8);
|
||||
final String proxy = randomAlphaOfLength(8);
|
||||
final int port = randomIntBetween(9200, 9300);
|
||||
final String value = proxy + ":" + port;
|
||||
final Settings settings =
|
||||
Settings.builder()
|
||||
.put(SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias).getKey(), value).build();
|
||||
assertThat(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias).get(settings), equalTo(value));
|
||||
assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias)});
|
||||
}
|
||||
|
||||
public void testProxyDefault() {
|
||||
final String alias = randomAlphaOfLength(8);
|
||||
assertThat(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias).get(Settings.EMPTY), equalTo(""));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.apache.logging.log4j.Level;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.bytes.CompositeBytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.MockLogAppender;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
@TestLogging(value = "org.elasticsearch.transport.TransportLogger:trace")
|
||||
public class TransportLoggerTests extends ESTestCase {
|
||||
|
||||
private MockLogAppender appender;
|
||||
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
appender = new MockLogAppender();
|
||||
Loggers.addAppender(Loggers.getLogger(TransportLogger.class), appender);
|
||||
appender.start();
|
||||
}
|
||||
|
||||
public void tearDown() throws Exception {
|
||||
Loggers.removeAppender(Loggers.getLogger(TransportLogger.class), appender);
|
||||
appender.stop();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
public void testLoggingHandler() throws IOException {
|
||||
TransportLogger transportLogger = new TransportLogger(Settings.EMPTY);
|
||||
|
||||
final String writePattern =
|
||||
".*\\[length: \\d+" +
|
||||
", request id: \\d+" +
|
||||
", type: request" +
|
||||
", version: .*" +
|
||||
", action: cluster:monitor/stats]" +
|
||||
" WRITE: \\d+B";
|
||||
final MockLogAppender.LoggingExpectation writeExpectation =
|
||||
new MockLogAppender.PatternSeenEventExcpectation(
|
||||
"hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern);
|
||||
|
||||
final String readPattern =
|
||||
".*\\[length: \\d+" +
|
||||
", request id: \\d+" +
|
||||
", type: request" +
|
||||
", version: .*" +
|
||||
", action: cluster:monitor/stats]" +
|
||||
" READ: \\d+B";
|
||||
|
||||
final MockLogAppender.LoggingExpectation readExpectation =
|
||||
new MockLogAppender.PatternSeenEventExcpectation(
|
||||
"cluster monitor request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern);
|
||||
|
||||
appender.addExpectation(writeExpectation);
|
||||
appender.addExpectation(readExpectation);
|
||||
BytesReference bytesReference = buildRequest();
|
||||
transportLogger.logInboundMessage(mock(TcpChannel.class), bytesReference.slice(6, bytesReference.length() - 6));
|
||||
transportLogger.logOutboundMessage(mock(TcpChannel.class), bytesReference);
|
||||
appender.assertAllExpectationsMatched();
|
||||
}
|
||||
|
||||
private BytesReference buildRequest() throws IOException {
|
||||
try (BytesStreamOutput messageOutput = new BytesStreamOutput()) {
|
||||
messageOutput.setVersion(Version.CURRENT);
|
||||
try (ThreadContext context = new ThreadContext(Settings.EMPTY)) {
|
||||
context.writeTo(messageOutput);
|
||||
}
|
||||
messageOutput.writeStringArray(new String[0]);
|
||||
messageOutput.writeString(ClusterStatsAction.NAME);
|
||||
new ClusterStatsRequest().writeTo(messageOutput);
|
||||
BytesReference messageBody = messageOutput.bytes();
|
||||
final BytesReference header = buildHeader(randomInt(30), messageBody.length());
|
||||
return new CompositeBytesReference(header, messageBody);
|
||||
}
|
||||
}
|
||||
|
||||
private BytesReference buildHeader(long requestId, int length) throws IOException {
|
||||
try (BytesStreamOutput headerOutput = new BytesStreamOutput(TcpHeader.HEADER_SIZE)) {
|
||||
headerOutput.setVersion(Version.CURRENT);
|
||||
TcpHeader.writeHeader(headerOutput, requestId, TransportStatus.setRequest((byte) 0), Version.CURRENT, length);
|
||||
final BytesReference bytes = headerOutput.bytes();
|
||||
assert bytes.length() == TcpHeader.HEADER_SIZE : "header size mismatch expected: " + TcpHeader.HEADER_SIZE + " but was: "
|
||||
+ bytes.length();
|
||||
return bytes;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2686,7 +2686,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
@SuppressForbidden(reason = "need local ephemeral port")
|
||||
private InetSocketAddress getLocalEphemeral() throws UnknownHostException {
|
||||
protected InetSocketAddress getLocalEphemeral() throws UnknownHostException {
|
||||
return new InetSocketAddress(InetAddress.getLocalHost(), 0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ information about the `xpack.security.enabled` setting, see
|
|||
PUT _cluster/settings
|
||||
{
|
||||
"persistent": {
|
||||
"search": {
|
||||
"cluster": {
|
||||
"remote": {
|
||||
"cluster_one": {
|
||||
"seeds": [ "10.0.1.1:9300" ]
|
||||
|
@ -82,7 +82,7 @@ First, enable cluster `one` to perform cross cluster search on remote cluster
|
|||
PUT _cluster/settings
|
||||
{
|
||||
"persistent": {
|
||||
"search.remote.cluster_two.seeds": [ "10.0.2.1:9300" ]
|
||||
"cluster.remote.cluster_two.seeds": [ "10.0.2.1:9300" ]
|
||||
}
|
||||
}
|
||||
-----------------------------------------------------------
|
||||
|
|
|
@ -27,7 +27,7 @@ followClusterTestCluster {
|
|||
numNodes = 1
|
||||
clusterName = 'follow-cluster'
|
||||
setting 'xpack.license.self_generated.type', 'trial'
|
||||
setting 'search.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\""
|
||||
setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\""
|
||||
}
|
||||
|
||||
followClusterTestRunner {
|
||||
|
|
|
@ -44,7 +44,7 @@ followClusterTestCluster {
|
|||
dependsOn leaderClusterTestRunner
|
||||
numNodes = 1
|
||||
clusterName = 'follow-cluster'
|
||||
setting 'search.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\""
|
||||
setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\""
|
||||
setting 'xpack.license.self_generated.type', 'trial'
|
||||
setting 'xpack.security.enabled', 'true'
|
||||
setting 'xpack.monitoring.enabled', 'false'
|
||||
|
|
|
@ -28,7 +28,7 @@ followClusterTestCluster {
|
|||
numNodes = 1
|
||||
clusterName = 'follow-cluster'
|
||||
setting 'xpack.license.self_generated.type', 'trial'
|
||||
setting 'search.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\""
|
||||
setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\""
|
||||
}
|
||||
|
||||
followClusterTestRunner {
|
||||
|
|
|
@ -78,6 +78,34 @@ public class FollowIndexIT extends ESRestTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testAutoFollowPatterns() throws Exception {
|
||||
assumeFalse("Test should only run when both clusters are running", runningAgainstLeaderCluster);
|
||||
|
||||
Request request = new Request("PUT", "/_ccr/_auto_follow/leader_cluster");
|
||||
request.setJsonEntity("{\"leader_index_patterns\": [\"logs-*\"]}");
|
||||
assertOK(client().performRequest(request));
|
||||
|
||||
try (RestClient leaderClient = buildLeaderClient()) {
|
||||
Settings settings = Settings.builder()
|
||||
.put("index.soft_deletes.enabled", true)
|
||||
.build();
|
||||
request = new Request("PUT", "/logs-20190101");
|
||||
request.setJsonEntity("{\"settings\": " + Strings.toString(settings) +
|
||||
", \"mappings\": {\"_doc\": {\"properties\": {\"field\": {\"type\": \"keyword\"}}}} }");
|
||||
assertOK(leaderClient.performRequest(request));
|
||||
|
||||
for (int i = 0; i < 5; i++) {
|
||||
String id = Integer.toString(i);
|
||||
index(leaderClient, "logs-20190101", id, "field", i, "filtered_field", "true");
|
||||
}
|
||||
}
|
||||
|
||||
assertBusy(() -> {
|
||||
ensureYellow("logs-20190101");
|
||||
verifyDocuments("logs-20190101", 5);
|
||||
});
|
||||
}
|
||||
|
||||
private static void index(RestClient client, String index, String id, Object... fields) throws IOException {
|
||||
XContentBuilder document = jsonBuilder().startObject();
|
||||
for (int i = 0; i < fields.length; i += 2) {
|
||||
|
@ -135,6 +163,15 @@ public class FollowIndexIT extends ESRestTestCase {
|
|||
return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false);
|
||||
}
|
||||
|
||||
private static void ensureYellow(String index) throws IOException {
|
||||
Request request = new Request("GET", "/_cluster/health/" + index);
|
||||
request.addParameter("wait_for_status", "yellow");
|
||||
request.addParameter("wait_for_no_relocating_shards", "true");
|
||||
request.addParameter("timeout", "70s");
|
||||
request.addParameter("level", "shards");
|
||||
client().performRequest(request);
|
||||
}
|
||||
|
||||
private RestClient buildLeaderClient() throws IOException {
|
||||
assert runningAgainstLeaderCluster == false;
|
||||
String leaderUrl = System.getProperty("tests.leader_host");
|
||||
|
|
|
@ -39,21 +39,28 @@ import org.elasticsearch.threadpool.ExecutorBuilder;
|
|||
import org.elasticsearch.threadpool.FixedExecutorBuilder;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator;
|
||||
import org.elasticsearch.xpack.ccr.action.CcrStatsAction;
|
||||
import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction;
|
||||
import org.elasticsearch.xpack.ccr.action.DeleteAutoFollowPatternAction;
|
||||
import org.elasticsearch.xpack.ccr.action.FollowIndexAction;
|
||||
import org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction;
|
||||
import org.elasticsearch.xpack.ccr.action.ShardChangesAction;
|
||||
import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask;
|
||||
import org.elasticsearch.xpack.ccr.action.ShardFollowTask;
|
||||
import org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor;
|
||||
import org.elasticsearch.xpack.ccr.action.TransportCcrStatsAction;
|
||||
import org.elasticsearch.xpack.ccr.action.TransportDeleteAutoFollowPatternAction;
|
||||
import org.elasticsearch.xpack.ccr.action.TransportPutAutoFollowPatternAction;
|
||||
import org.elasticsearch.xpack.ccr.action.UnfollowIndexAction;
|
||||
import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsAction;
|
||||
import org.elasticsearch.xpack.ccr.action.bulk.TransportBulkShardOperationsAction;
|
||||
import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory;
|
||||
import org.elasticsearch.xpack.ccr.rest.RestCcrStatsAction;
|
||||
import org.elasticsearch.xpack.ccr.rest.RestCreateAndFollowIndexAction;
|
||||
import org.elasticsearch.xpack.ccr.rest.RestDeleteAutoFollowPatternAction;
|
||||
import org.elasticsearch.xpack.ccr.rest.RestFollowIndexAction;
|
||||
import org.elasticsearch.xpack.ccr.rest.RestPutAutoFollowPatternAction;
|
||||
import org.elasticsearch.xpack.ccr.rest.RestUnfollowIndexAction;
|
||||
import org.elasticsearch.xpack.core.XPackPlugin;
|
||||
|
||||
|
@ -113,7 +120,14 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E
|
|||
final Environment environment,
|
||||
final NodeEnvironment nodeEnvironment,
|
||||
final NamedWriteableRegistry namedWriteableRegistry) {
|
||||
return Collections.singleton(ccrLicenseChecker);
|
||||
if (enabled == false) {
|
||||
return emptyList();
|
||||
}
|
||||
|
||||
return Arrays.asList(
|
||||
ccrLicenseChecker,
|
||||
new AutoFollowCoordinator(settings, client, threadPool, clusterService)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -128,12 +142,18 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E
|
|||
}
|
||||
|
||||
return Arrays.asList(
|
||||
// internal actions
|
||||
new ActionHandler<>(BulkShardOperationsAction.INSTANCE, TransportBulkShardOperationsAction.class),
|
||||
new ActionHandler<>(ShardChangesAction.INSTANCE, ShardChangesAction.TransportAction.class),
|
||||
// stats action
|
||||
new ActionHandler<>(CcrStatsAction.INSTANCE, TransportCcrStatsAction.class),
|
||||
// follow actions
|
||||
new ActionHandler<>(CreateAndFollowIndexAction.INSTANCE, CreateAndFollowIndexAction.TransportAction.class),
|
||||
new ActionHandler<>(FollowIndexAction.INSTANCE, FollowIndexAction.TransportAction.class),
|
||||
new ActionHandler<>(ShardChangesAction.INSTANCE, ShardChangesAction.TransportAction.class),
|
||||
new ActionHandler<>(UnfollowIndexAction.INSTANCE, UnfollowIndexAction.TransportAction.class));
|
||||
new ActionHandler<>(UnfollowIndexAction.INSTANCE, UnfollowIndexAction.TransportAction.class),
|
||||
// auto-follow actions
|
||||
new ActionHandler<>(DeleteAutoFollowPatternAction.INSTANCE, TransportDeleteAutoFollowPatternAction.class),
|
||||
new ActionHandler<>(PutAutoFollowPatternAction.INSTANCE, TransportPutAutoFollowPatternAction.class));
|
||||
}
|
||||
|
||||
public List<RestHandler> getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings,
|
||||
|
@ -141,10 +161,15 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E
|
|||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Supplier<DiscoveryNodes> nodesInCluster) {
|
||||
return Arrays.asList(
|
||||
// stats API
|
||||
new RestCcrStatsAction(settings, restController),
|
||||
// follow APIs
|
||||
new RestCreateAndFollowIndexAction(settings, restController),
|
||||
new RestFollowIndexAction(settings, restController),
|
||||
new RestUnfollowIndexAction(settings, restController));
|
||||
new RestUnfollowIndexAction(settings, restController),
|
||||
// auto-follow APIs
|
||||
new RestDeleteAutoFollowPatternAction(settings, restController),
|
||||
new RestPutAutoFollowPatternAction(settings, restController));
|
||||
}
|
||||
|
||||
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ccr;
|
|||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
@ -32,6 +33,12 @@ public final class CcrSettings {
|
|||
public static final Setting<Boolean> CCR_FOLLOWING_INDEX_SETTING =
|
||||
Setting.boolSetting("index.xpack.ccr.following_index", false, Setting.Property.IndexScope);
|
||||
|
||||
/**
|
||||
* Setting for controlling the interval in between polling leader clusters to check whether there are indices to follow
|
||||
*/
|
||||
public static final Setting<TimeValue> CCR_AUTO_FOLLOW_POLL_INTERVAL =
|
||||
Setting.timeSetting("xpack.ccr.auto_follow.poll_interval", TimeValue.timeValueMillis(2500), Property.NodeScope);
|
||||
|
||||
/**
|
||||
* The settings defined by CCR.
|
||||
*
|
||||
|
@ -40,7 +47,8 @@ public final class CcrSettings {
|
|||
static List<Setting<?>> getSettings() {
|
||||
return Arrays.asList(
|
||||
CCR_ENABLED_SETTING,
|
||||
CCR_FOLLOWING_INDEX_SETTING);
|
||||
CCR_FOLLOWING_INDEX_SETTING,
|
||||
CCR_AUTO_FOLLOW_POLL_INTERVAL);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,306 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.action;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateApplier;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.ccr.CcrSettings;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* A component that runs only on the elected master node and follows leader indices automatically
|
||||
* if they match with a auto follow pattern that is defined in {@link AutoFollowMetadata}.
|
||||
*/
|
||||
public class AutoFollowCoordinator implements ClusterStateApplier {
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(AutoFollowCoordinator.class);
|
||||
|
||||
private final Client client;
|
||||
private final TimeValue pollInterval;
|
||||
private final ThreadPool threadPool;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private volatile boolean localNodeMaster = false;
|
||||
|
||||
public AutoFollowCoordinator(Settings settings,
|
||||
Client client,
|
||||
ThreadPool threadPool,
|
||||
ClusterService clusterService) {
|
||||
this.client = client;
|
||||
this.threadPool = threadPool;
|
||||
this.clusterService = clusterService;
|
||||
|
||||
this.pollInterval = CcrSettings.CCR_AUTO_FOLLOW_POLL_INTERVAL.get(settings);
|
||||
clusterService.addStateApplier(this);
|
||||
}
|
||||
|
||||
private void doAutoFollow() {
|
||||
if (localNodeMaster == false) {
|
||||
return;
|
||||
}
|
||||
ClusterState followerClusterState = clusterService.state();
|
||||
AutoFollowMetadata autoFollowMetadata = followerClusterState.getMetaData().custom(AutoFollowMetadata.TYPE);
|
||||
if (autoFollowMetadata == null) {
|
||||
threadPool.schedule(pollInterval, ThreadPool.Names.SAME, this::doAutoFollow);
|
||||
return;
|
||||
}
|
||||
|
||||
if (autoFollowMetadata.getPatterns().isEmpty()) {
|
||||
threadPool.schedule(pollInterval, ThreadPool.Names.SAME, this::doAutoFollow);
|
||||
return;
|
||||
}
|
||||
|
||||
Consumer<Exception> handler = e -> {
|
||||
if (e != null) {
|
||||
LOGGER.warn("Failure occurred during auto following indices", e);
|
||||
}
|
||||
threadPool.schedule(pollInterval, ThreadPool.Names.SAME, this::doAutoFollow);
|
||||
};
|
||||
AutoFollower operation = new AutoFollower(client, handler, followerClusterState) {
|
||||
|
||||
@Override
|
||||
void getLeaderClusterState(Client leaderClient, BiConsumer<ClusterState, Exception> handler) {
|
||||
ClusterStateRequest request = new ClusterStateRequest();
|
||||
request.clear();
|
||||
request.metaData(true);
|
||||
leaderClient.admin().cluster().state(request,
|
||||
ActionListener.wrap(
|
||||
r -> handler.accept(r.getState(), null),
|
||||
e -> handler.accept(null, e)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
void createAndFollow(FollowIndexAction.Request followRequest,
|
||||
Runnable successHandler,
|
||||
Consumer<Exception> failureHandler) {
|
||||
client.execute(CreateAndFollowIndexAction.INSTANCE, new CreateAndFollowIndexAction.Request(followRequest),
|
||||
ActionListener.wrap(r -> successHandler.run(), failureHandler));
|
||||
}
|
||||
|
||||
@Override
|
||||
void updateAutoFollowMetadata(Function<ClusterState, ClusterState> updateFunction, Consumer<Exception> handler) {
|
||||
clusterService.submitStateUpdateTask("update_auto_follow_metadata", new ClusterStateUpdateTask() {
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
return updateFunction.apply(currentState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
handler.accept(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
handler.accept(null);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
};
|
||||
operation.autoFollowIndices();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyClusterState(ClusterChangedEvent event) {
|
||||
final boolean beforeLocalMasterNode = localNodeMaster;
|
||||
localNodeMaster = event.localNodeMaster();
|
||||
if (beforeLocalMasterNode == false && localNodeMaster) {
|
||||
threadPool.schedule(pollInterval, ThreadPool.Names.SAME, this::doAutoFollow);
|
||||
}
|
||||
}
|
||||
|
||||
abstract static class AutoFollower {
|
||||
|
||||
private final Client client;
|
||||
private final Consumer<Exception> handler;
|
||||
private final ClusterState followerClusterState;
|
||||
private final AutoFollowMetadata autoFollowMetadata;
|
||||
|
||||
private final CountDown autoFollowPatternsCountDown;
|
||||
private final AtomicReference<Exception> autoFollowPatternsErrorHolder = new AtomicReference<>();
|
||||
|
||||
AutoFollower(Client client, Consumer<Exception> handler, ClusterState followerClusterState) {
|
||||
this.client = client;
|
||||
this.handler = handler;
|
||||
this.followerClusterState = followerClusterState;
|
||||
this.autoFollowMetadata = followerClusterState.getMetaData().custom(AutoFollowMetadata.TYPE);
|
||||
this.autoFollowPatternsCountDown = new CountDown(autoFollowMetadata.getPatterns().size());
|
||||
}
|
||||
|
||||
void autoFollowIndices() {
|
||||
for (Map.Entry<String, AutoFollowPattern> entry : autoFollowMetadata.getPatterns().entrySet()) {
|
||||
String clusterAlias = entry.getKey();
|
||||
AutoFollowPattern autoFollowPattern = entry.getValue();
|
||||
Client leaderClient = clusterAlias.equals("_local_") ? client : client.getRemoteClusterClient(clusterAlias);
|
||||
List<String> followedIndices = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get(clusterAlias);
|
||||
|
||||
getLeaderClusterState(leaderClient, (leaderClusterState, e) -> {
|
||||
if (leaderClusterState != null) {
|
||||
assert e == null;
|
||||
handleClusterAlias(clusterAlias, autoFollowPattern, followedIndices, leaderClusterState);
|
||||
} else {
|
||||
finalise(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private void handleClusterAlias(String clusterAlias, AutoFollowPattern autoFollowPattern,
|
||||
List<String> followedIndexUUIDs, ClusterState leaderClusterState) {
|
||||
final List<Index> leaderIndicesToFollow =
|
||||
getLeaderIndicesToFollow(autoFollowPattern, leaderClusterState, followerClusterState, followedIndexUUIDs);
|
||||
if (leaderIndicesToFollow.isEmpty()) {
|
||||
finalise(null);
|
||||
} else {
|
||||
final CountDown leaderIndicesCountDown = new CountDown(leaderIndicesToFollow.size());
|
||||
final AtomicReference<Exception> leaderIndicesErrorHolder = new AtomicReference<>();
|
||||
for (Index indexToFollow : leaderIndicesToFollow) {
|
||||
final String leaderIndexName = indexToFollow.getName();
|
||||
final String followIndexName = getFollowerIndexName(autoFollowPattern, leaderIndexName);
|
||||
|
||||
String leaderIndexNameWithClusterAliasPrefix = clusterAlias.equals("_local_") ? leaderIndexName :
|
||||
clusterAlias + ":" + leaderIndexName;
|
||||
FollowIndexAction.Request followRequest =
|
||||
new FollowIndexAction.Request(leaderIndexNameWithClusterAliasPrefix, followIndexName,
|
||||
autoFollowPattern.getMaxBatchOperationCount(), autoFollowPattern.getMaxConcurrentReadBatches(),
|
||||
autoFollowPattern.getMaxOperationSizeInBytes(), autoFollowPattern.getMaxConcurrentWriteBatches(),
|
||||
autoFollowPattern.getMaxWriteBufferSize(), autoFollowPattern.getRetryTimeout(),
|
||||
autoFollowPattern.getIdleShardRetryDelay());
|
||||
|
||||
// Execute if the create and follow api call succeeds:
|
||||
Runnable successHandler = () -> {
|
||||
LOGGER.info("Auto followed leader index [{}] as follow index [{}]", leaderIndexName, followIndexName);
|
||||
|
||||
// This function updates the auto follow metadata in the cluster to record that the leader index has been followed:
|
||||
// (so that we do not try to follow it in subsequent auto follow runs)
|
||||
Function<ClusterState, ClusterState> function = recordLeaderIndexAsFollowFunction(clusterAlias, indexToFollow);
|
||||
// The coordinator always runs on the elected master node, so we can update cluster state here:
|
||||
updateAutoFollowMetadata(function, updateError -> {
|
||||
if (updateError != null) {
|
||||
LOGGER.error("Failed to mark leader index [" + leaderIndexName + "] as auto followed", updateError);
|
||||
if (leaderIndicesErrorHolder.compareAndSet(null, updateError) == false) {
|
||||
leaderIndicesErrorHolder.get().addSuppressed(updateError);
|
||||
}
|
||||
} else {
|
||||
LOGGER.debug("Successfully marked leader index [{}] as auto followed", leaderIndexName);
|
||||
}
|
||||
if (leaderIndicesCountDown.countDown()) {
|
||||
finalise(leaderIndicesErrorHolder.get());
|
||||
}
|
||||
});
|
||||
};
|
||||
// Execute if the create and follow apu call fails:
|
||||
Consumer<Exception> failureHandler = followError -> {
|
||||
assert followError != null;
|
||||
LOGGER.warn("Failed to auto follow leader index [" + leaderIndexName + "]", followError);
|
||||
if (leaderIndicesCountDown.countDown()) {
|
||||
finalise(followError);
|
||||
}
|
||||
};
|
||||
createAndFollow(followRequest, successHandler, failureHandler);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void finalise(Exception failure) {
|
||||
if (autoFollowPatternsErrorHolder.compareAndSet(null, failure) == false) {
|
||||
autoFollowPatternsErrorHolder.get().addSuppressed(failure);
|
||||
}
|
||||
|
||||
if (autoFollowPatternsCountDown.countDown()) {
|
||||
handler.accept(autoFollowPatternsErrorHolder.get());
|
||||
}
|
||||
}
|
||||
|
||||
static List<Index> getLeaderIndicesToFollow(AutoFollowPattern autoFollowPattern,
|
||||
ClusterState leaderClusterState,
|
||||
ClusterState followerClusterState,
|
||||
List<String> followedIndexUUIDs) {
|
||||
List<Index> leaderIndicesToFollow = new ArrayList<>();
|
||||
for (IndexMetaData leaderIndexMetaData : leaderClusterState.getMetaData()) {
|
||||
if (autoFollowPattern.match(leaderIndexMetaData.getIndex().getName())) {
|
||||
if (followedIndexUUIDs.contains(leaderIndexMetaData.getIndex().getUUID()) == false) {
|
||||
// TODO: iterate over the indices in the followerClusterState and check whether a IndexMetaData
|
||||
// has a leader index uuid custom metadata entry that matches with uuid of leaderIndexMetaData variable
|
||||
// If so then handle it differently: not follow it, but just add an entry to
|
||||
// AutoFollowMetadata#followedLeaderIndexUUIDs
|
||||
leaderIndicesToFollow.add(leaderIndexMetaData.getIndex());
|
||||
}
|
||||
}
|
||||
}
|
||||
return leaderIndicesToFollow;
|
||||
}
|
||||
|
||||
static String getFollowerIndexName(AutoFollowPattern autoFollowPattern, String leaderIndexName) {
|
||||
if (autoFollowPattern.getFollowIndexPattern() != null) {
|
||||
return autoFollowPattern.getFollowIndexPattern().replace("{{leader_index}}", leaderIndexName);
|
||||
} else {
|
||||
return leaderIndexName;
|
||||
}
|
||||
}
|
||||
|
||||
static Function<ClusterState, ClusterState> recordLeaderIndexAsFollowFunction(String clusterAlias, Index indexToFollow) {
|
||||
return currentState -> {
|
||||
AutoFollowMetadata currentAutoFollowMetadata = currentState.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
|
||||
Map<String, List<String>> newFollowedIndexUUIDS =
|
||||
new HashMap<>(currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs());
|
||||
newFollowedIndexUUIDS.get(clusterAlias).add(indexToFollow.getUUID());
|
||||
|
||||
ClusterState.Builder newState = ClusterState.builder(currentState);
|
||||
AutoFollowMetadata newAutoFollowMetadata =
|
||||
new AutoFollowMetadata(currentAutoFollowMetadata.getPatterns(), newFollowedIndexUUIDS);
|
||||
newState.metaData(MetaData.builder(currentState.getMetaData())
|
||||
.putCustom(AutoFollowMetadata.TYPE, newAutoFollowMetadata)
|
||||
.build());
|
||||
return newState.build();
|
||||
};
|
||||
}
|
||||
|
||||
// abstract methods to make unit testing possible:
|
||||
|
||||
abstract void getLeaderClusterState(Client leaderClient,
|
||||
BiConsumer<ClusterState,
|
||||
Exception> handler);
|
||||
|
||||
abstract void createAndFollow(FollowIndexAction.Request followRequest,
|
||||
Runnable successHandler,
|
||||
Consumer<Exception> failureHandler);
|
||||
|
||||
abstract void updateAutoFollowMetadata(Function<ClusterState, ClusterState> updateFunction,
|
||||
Consumer<Exception> handler);
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.action;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
public class DeleteAutoFollowPatternAction extends Action<AcknowledgedResponse> {
|
||||
|
||||
public static final String NAME = "cluster:admin/xpack/ccr/auto_follow_pattern/delete";
|
||||
public static final DeleteAutoFollowPatternAction INSTANCE = new DeleteAutoFollowPatternAction();
|
||||
|
||||
private DeleteAutoFollowPatternAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AcknowledgedResponse newResponse() {
|
||||
return new AcknowledgedResponse();
|
||||
}
|
||||
|
||||
public static class Request extends AcknowledgedRequest<Request> {
|
||||
|
||||
private String leaderClusterAlias;
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (leaderClusterAlias == null) {
|
||||
validationException = addValidationError("leaderClusterAlias is missing", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
public String getLeaderClusterAlias() {
|
||||
return leaderClusterAlias;
|
||||
}
|
||||
|
||||
public void setLeaderClusterAlias(String leaderClusterAlias) {
|
||||
this.leaderClusterAlias = leaderClusterAlias;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
leaderClusterAlias = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(leaderClusterAlias);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Request request = (Request) o;
|
||||
return Objects.equals(leaderClusterAlias, request.leaderClusterAlias);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(leaderClusterAlias);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,284 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.action;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
public class PutAutoFollowPatternAction extends Action<AcknowledgedResponse> {
|
||||
|
||||
public static final String NAME = "cluster:admin/xpack/ccr/auto_follow_pattern/put";
|
||||
public static final PutAutoFollowPatternAction INSTANCE = new PutAutoFollowPatternAction();
|
||||
|
||||
private PutAutoFollowPatternAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AcknowledgedResponse newResponse() {
|
||||
return new AcknowledgedResponse();
|
||||
}
|
||||
|
||||
public static class Request extends AcknowledgedRequest<Request> implements ToXContentObject {
|
||||
|
||||
static final ParseField LEADER_CLUSTER_ALIAS_FIELD = new ParseField("leader_cluster_alias");
|
||||
static final ParseField LEADER_INDEX_PATTERNS_FIELD = new ParseField("leader_index_patterns");
|
||||
static final ParseField FOLLOW_INDEX_NAME_PATTERN_FIELD = new ParseField("follow_index_name_pattern");
|
||||
|
||||
private static final ObjectParser<Request, String> PARSER = new ObjectParser<>("put_auto_follow_pattern_request", Request::new);
|
||||
|
||||
static {
|
||||
PARSER.declareString(Request::setLeaderClusterAlias, LEADER_CLUSTER_ALIAS_FIELD);
|
||||
PARSER.declareStringArray(Request::setLeaderIndexPatterns, LEADER_INDEX_PATTERNS_FIELD);
|
||||
PARSER.declareString(Request::setFollowIndexNamePattern, FOLLOW_INDEX_NAME_PATTERN_FIELD);
|
||||
PARSER.declareInt(Request::setMaxBatchOperationCount, AutoFollowPattern.MAX_BATCH_OPERATION_COUNT);
|
||||
PARSER.declareInt(Request::setMaxConcurrentReadBatches, AutoFollowPattern.MAX_CONCURRENT_READ_BATCHES);
|
||||
PARSER.declareLong(Request::setMaxOperationSizeInBytes, AutoFollowPattern.MAX_BATCH_SIZE_IN_BYTES);
|
||||
PARSER.declareInt(Request::setMaxConcurrentWriteBatches, AutoFollowPattern.MAX_CONCURRENT_WRITE_BATCHES);
|
||||
PARSER.declareInt(Request::setMaxWriteBufferSize, AutoFollowPattern.MAX_WRITE_BUFFER_SIZE);
|
||||
PARSER.declareField(Request::setRetryTimeout,
|
||||
(p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.RETRY_TIMEOUT.getPreferredName()),
|
||||
ShardFollowTask.RETRY_TIMEOUT, ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(Request::setIdleShardRetryDelay,
|
||||
(p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.IDLE_SHARD_RETRY_DELAY.getPreferredName()),
|
||||
ShardFollowTask.IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING);
|
||||
}
|
||||
|
||||
public static Request fromXContent(XContentParser parser, String remoteClusterAlias) throws IOException {
|
||||
Request request = PARSER.parse(parser, null);
|
||||
if (remoteClusterAlias != null) {
|
||||
if (request.leaderClusterAlias == null) {
|
||||
request.leaderClusterAlias = remoteClusterAlias;
|
||||
} else {
|
||||
if (request.leaderClusterAlias.equals(remoteClusterAlias) == false) {
|
||||
throw new IllegalArgumentException("provided leaderClusterAlias is not equal");
|
||||
}
|
||||
}
|
||||
}
|
||||
return request;
|
||||
}
|
||||
|
||||
private String leaderClusterAlias;
|
||||
private List<String> leaderIndexPatterns;
|
||||
private String followIndexNamePattern;
|
||||
|
||||
private Integer maxBatchOperationCount;
|
||||
private Integer maxConcurrentReadBatches;
|
||||
private Long maxOperationSizeInBytes;
|
||||
private Integer maxConcurrentWriteBatches;
|
||||
private Integer maxWriteBufferSize;
|
||||
private TimeValue retryTimeout;
|
||||
private TimeValue idleShardRetryDelay;
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (leaderClusterAlias == null) {
|
||||
validationException = addValidationError("leaderClusterAlias is missing", validationException);
|
||||
}
|
||||
if (leaderIndexPatterns == null || leaderIndexPatterns.isEmpty()) {
|
||||
validationException = addValidationError("leaderIndexPatterns is missing", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
public String getLeaderClusterAlias() {
|
||||
return leaderClusterAlias;
|
||||
}
|
||||
|
||||
public void setLeaderClusterAlias(String leaderClusterAlias) {
|
||||
this.leaderClusterAlias = leaderClusterAlias;
|
||||
}
|
||||
|
||||
public List<String> getLeaderIndexPatterns() {
|
||||
return leaderIndexPatterns;
|
||||
}
|
||||
|
||||
public void setLeaderIndexPatterns(List<String> leaderIndexPatterns) {
|
||||
this.leaderIndexPatterns = leaderIndexPatterns;
|
||||
}
|
||||
|
||||
public String getFollowIndexNamePattern() {
|
||||
return followIndexNamePattern;
|
||||
}
|
||||
|
||||
public void setFollowIndexNamePattern(String followIndexNamePattern) {
|
||||
this.followIndexNamePattern = followIndexNamePattern;
|
||||
}
|
||||
|
||||
public Integer getMaxBatchOperationCount() {
|
||||
return maxBatchOperationCount;
|
||||
}
|
||||
|
||||
public void setMaxBatchOperationCount(Integer maxBatchOperationCount) {
|
||||
this.maxBatchOperationCount = maxBatchOperationCount;
|
||||
}
|
||||
|
||||
public Integer getMaxConcurrentReadBatches() {
|
||||
return maxConcurrentReadBatches;
|
||||
}
|
||||
|
||||
public void setMaxConcurrentReadBatches(Integer maxConcurrentReadBatches) {
|
||||
this.maxConcurrentReadBatches = maxConcurrentReadBatches;
|
||||
}
|
||||
|
||||
public Long getMaxOperationSizeInBytes() {
|
||||
return maxOperationSizeInBytes;
|
||||
}
|
||||
|
||||
public void setMaxOperationSizeInBytes(Long maxOperationSizeInBytes) {
|
||||
this.maxOperationSizeInBytes = maxOperationSizeInBytes;
|
||||
}
|
||||
|
||||
public Integer getMaxConcurrentWriteBatches() {
|
||||
return maxConcurrentWriteBatches;
|
||||
}
|
||||
|
||||
public void setMaxConcurrentWriteBatches(Integer maxConcurrentWriteBatches) {
|
||||
this.maxConcurrentWriteBatches = maxConcurrentWriteBatches;
|
||||
}
|
||||
|
||||
public Integer getMaxWriteBufferSize() {
|
||||
return maxWriteBufferSize;
|
||||
}
|
||||
|
||||
public void setMaxWriteBufferSize(Integer maxWriteBufferSize) {
|
||||
this.maxWriteBufferSize = maxWriteBufferSize;
|
||||
}
|
||||
|
||||
public TimeValue getRetryTimeout() {
|
||||
return retryTimeout;
|
||||
}
|
||||
|
||||
public void setRetryTimeout(TimeValue retryTimeout) {
|
||||
this.retryTimeout = retryTimeout;
|
||||
}
|
||||
|
||||
public TimeValue getIdleShardRetryDelay() {
|
||||
return idleShardRetryDelay;
|
||||
}
|
||||
|
||||
public void setIdleShardRetryDelay(TimeValue idleShardRetryDelay) {
|
||||
this.idleShardRetryDelay = idleShardRetryDelay;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
leaderClusterAlias = in.readString();
|
||||
leaderIndexPatterns = in.readList(StreamInput::readString);
|
||||
followIndexNamePattern = in.readOptionalString();
|
||||
maxBatchOperationCount = in.readOptionalVInt();
|
||||
maxConcurrentReadBatches = in.readOptionalVInt();
|
||||
maxOperationSizeInBytes = in.readOptionalLong();
|
||||
maxConcurrentWriteBatches = in.readOptionalVInt();
|
||||
maxWriteBufferSize = in.readOptionalVInt();
|
||||
retryTimeout = in.readOptionalTimeValue();
|
||||
idleShardRetryDelay = in.readOptionalTimeValue();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(leaderClusterAlias);
|
||||
out.writeStringList(leaderIndexPatterns);
|
||||
out.writeOptionalString(followIndexNamePattern);
|
||||
out.writeOptionalVInt(maxBatchOperationCount);
|
||||
out.writeOptionalVInt(maxConcurrentReadBatches);
|
||||
out.writeOptionalLong(maxOperationSizeInBytes);
|
||||
out.writeOptionalVInt(maxConcurrentWriteBatches);
|
||||
out.writeOptionalVInt(maxWriteBufferSize);
|
||||
out.writeOptionalTimeValue(retryTimeout);
|
||||
out.writeOptionalTimeValue(idleShardRetryDelay);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(LEADER_CLUSTER_ALIAS_FIELD.getPreferredName(), leaderClusterAlias);
|
||||
builder.field(LEADER_INDEX_PATTERNS_FIELD.getPreferredName(), leaderIndexPatterns);
|
||||
if (followIndexNamePattern != null) {
|
||||
builder.field(FOLLOW_INDEX_NAME_PATTERN_FIELD.getPreferredName(), followIndexNamePattern);
|
||||
}
|
||||
if (maxBatchOperationCount != null) {
|
||||
builder.field(ShardFollowTask.MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount);
|
||||
}
|
||||
if (maxOperationSizeInBytes != null) {
|
||||
builder.field(ShardFollowTask.MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes);
|
||||
}
|
||||
if (maxWriteBufferSize != null) {
|
||||
builder.field(ShardFollowTask.MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize);
|
||||
}
|
||||
if (maxConcurrentReadBatches != null) {
|
||||
builder.field(ShardFollowTask.MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches);
|
||||
}
|
||||
if (maxConcurrentWriteBatches != null) {
|
||||
builder.field(ShardFollowTask.MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches);
|
||||
}
|
||||
if (retryTimeout != null) {
|
||||
builder.field(ShardFollowTask.RETRY_TIMEOUT.getPreferredName(), retryTimeout.getStringRep());
|
||||
}
|
||||
if (idleShardRetryDelay != null) {
|
||||
builder.field(ShardFollowTask.IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep());
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Request request = (Request) o;
|
||||
return Objects.equals(leaderClusterAlias, request.leaderClusterAlias) &&
|
||||
Objects.equals(leaderIndexPatterns, request.leaderIndexPatterns) &&
|
||||
Objects.equals(followIndexNamePattern, request.followIndexNamePattern) &&
|
||||
Objects.equals(maxBatchOperationCount, request.maxBatchOperationCount) &&
|
||||
Objects.equals(maxConcurrentReadBatches, request.maxConcurrentReadBatches) &&
|
||||
Objects.equals(maxOperationSizeInBytes, request.maxOperationSizeInBytes) &&
|
||||
Objects.equals(maxConcurrentWriteBatches, request.maxConcurrentWriteBatches) &&
|
||||
Objects.equals(maxWriteBufferSize, request.maxWriteBufferSize) &&
|
||||
Objects.equals(retryTimeout, request.retryTimeout) &&
|
||||
Objects.equals(idleShardRetryDelay, request.idleShardRetryDelay);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(
|
||||
leaderClusterAlias,
|
||||
leaderIndexPatterns,
|
||||
followIndexNamePattern,
|
||||
maxBatchOperationCount,
|
||||
maxConcurrentReadBatches,
|
||||
maxOperationSizeInBytes,
|
||||
maxConcurrentWriteBatches,
|
||||
maxWriteBufferSize,
|
||||
retryTimeout,
|
||||
idleShardRetryDelay
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.action;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class TransportDeleteAutoFollowPatternAction extends
|
||||
TransportMasterNodeAction<DeleteAutoFollowPatternAction.Request, AcknowledgedResponse> {
|
||||
|
||||
@Inject
|
||||
public TransportDeleteAutoFollowPatternAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, DeleteAutoFollowPatternAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, DeleteAutoFollowPatternAction.Request::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AcknowledgedResponse newResponse() {
|
||||
return new AcknowledgedResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(DeleteAutoFollowPatternAction.Request request,
|
||||
ClusterState state,
|
||||
ActionListener<AcknowledgedResponse> listener) throws Exception {
|
||||
clusterService.submitStateUpdateTask("put-auto-follow-pattern-" + request.getLeaderClusterAlias(),
|
||||
new AckedClusterStateUpdateTask<AcknowledgedResponse>(request, listener) {
|
||||
|
||||
@Override
|
||||
protected AcknowledgedResponse newResponse(boolean acknowledged) {
|
||||
return new AcknowledgedResponse(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
return innerDelete(request, currentState);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
static ClusterState innerDelete(DeleteAutoFollowPatternAction.Request request, ClusterState currentState) {
|
||||
AutoFollowMetadata currentAutoFollowMetadata = currentState.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
if (currentAutoFollowMetadata == null) {
|
||||
throw new ResourceNotFoundException("no auto-follow patterns for cluster alias [{}] found",
|
||||
request.getLeaderClusterAlias());
|
||||
}
|
||||
Map<String, AutoFollowPattern> patterns = currentAutoFollowMetadata.getPatterns();
|
||||
AutoFollowPattern autoFollowPatternToRemove = patterns.get(request.getLeaderClusterAlias());
|
||||
if (autoFollowPatternToRemove == null) {
|
||||
throw new ResourceNotFoundException("no auto-follow patterns for cluster alias [{}] found",
|
||||
request.getLeaderClusterAlias());
|
||||
}
|
||||
|
||||
final Map<String, AutoFollowPattern> patternsCopy = new HashMap<>(patterns);
|
||||
final Map<String, List<String>> followedLeaderIndexUUIDSCopy =
|
||||
new HashMap<>(currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs());
|
||||
patternsCopy.remove(request.getLeaderClusterAlias());
|
||||
followedLeaderIndexUUIDSCopy.remove(request.getLeaderClusterAlias());
|
||||
|
||||
AutoFollowMetadata newAutoFollowMetadata = new AutoFollowMetadata(patternsCopy, followedLeaderIndexUUIDSCopy);
|
||||
ClusterState.Builder newState = ClusterState.builder(currentState);
|
||||
newState.metaData(MetaData.builder(currentState.getMetaData())
|
||||
.putCustom(AutoFollowMetadata.TYPE, newAutoFollowMetadata)
|
||||
.build());
|
||||
return newState.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(DeleteAutoFollowPatternAction.Request request, ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,173 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.action;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class TransportPutAutoFollowPatternAction extends
|
||||
TransportMasterNodeAction<PutAutoFollowPatternAction.Request, AcknowledgedResponse> {
|
||||
|
||||
private final Client client;
|
||||
|
||||
@Inject
|
||||
public TransportPutAutoFollowPatternAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters, Client client,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, PutAutoFollowPatternAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, PutAutoFollowPatternAction.Request::new);
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AcknowledgedResponse newResponse() {
|
||||
return new AcknowledgedResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(PutAutoFollowPatternAction.Request request,
|
||||
ClusterState state,
|
||||
ActionListener<AcknowledgedResponse> listener) throws Exception {
|
||||
final Client leaderClient;
|
||||
if (request.getLeaderClusterAlias().equals("_local_")) {
|
||||
leaderClient = client;
|
||||
} else {
|
||||
leaderClient = client.getRemoteClusterClient(request.getLeaderClusterAlias());
|
||||
}
|
||||
|
||||
final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
|
||||
clusterStateRequest.clear();
|
||||
clusterStateRequest.metaData(true);
|
||||
|
||||
leaderClient.admin().cluster().state(clusterStateRequest, ActionListener.wrap(clusterStateResponse -> {
|
||||
final ClusterState leaderClusterState = clusterStateResponse.getState();
|
||||
clusterService.submitStateUpdateTask("put-auto-follow-pattern-" + request.getLeaderClusterAlias(),
|
||||
new AckedClusterStateUpdateTask<AcknowledgedResponse>(request, listener) {
|
||||
|
||||
@Override
|
||||
protected AcknowledgedResponse newResponse(boolean acknowledged) {
|
||||
return new AcknowledgedResponse(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
return innerPut(request, currentState, leaderClusterState);
|
||||
}
|
||||
});
|
||||
}, listener::onFailure));
|
||||
}
|
||||
|
||||
static ClusterState innerPut(PutAutoFollowPatternAction.Request request,
|
||||
ClusterState localState,
|
||||
ClusterState leaderClusterState) {
|
||||
// auto patterns are always overwritten
|
||||
// only already followed index uuids are updated
|
||||
|
||||
AutoFollowMetadata currentAutoFollowMetadata = localState.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
Map<String, List<String>> followedLeaderIndices;
|
||||
Map<String, AutoFollowPattern> patterns;
|
||||
if (currentAutoFollowMetadata != null) {
|
||||
patterns = new HashMap<>(currentAutoFollowMetadata.getPatterns());
|
||||
followedLeaderIndices = new HashMap<>(currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs());
|
||||
} else {
|
||||
patterns = new HashMap<>();
|
||||
followedLeaderIndices = new HashMap<>();
|
||||
}
|
||||
|
||||
AutoFollowPattern previousPattern = patterns.get(request.getLeaderClusterAlias());
|
||||
List<String> followedIndexUUIDs = followedLeaderIndices.get(request.getLeaderClusterAlias());
|
||||
if (followedIndexUUIDs == null) {
|
||||
followedIndexUUIDs = new ArrayList<>();
|
||||
followedLeaderIndices.put(request.getLeaderClusterAlias(), followedIndexUUIDs);
|
||||
}
|
||||
|
||||
// Mark existing leader indices as already auto followed:
|
||||
if (previousPattern != null) {
|
||||
markExistingIndicesAsAutoFollowedForNewPatterns(request.getLeaderIndexPatterns(), leaderClusterState.metaData(),
|
||||
previousPattern, followedIndexUUIDs);
|
||||
} else {
|
||||
markExistingIndicesAsAutoFollowed(request.getLeaderIndexPatterns(), leaderClusterState.metaData(),
|
||||
followedIndexUUIDs);
|
||||
}
|
||||
|
||||
AutoFollowPattern autoFollowPattern = new AutoFollowPattern(
|
||||
request.getLeaderIndexPatterns(),
|
||||
request.getFollowIndexNamePattern(),
|
||||
request.getMaxBatchOperationCount(),
|
||||
request.getMaxConcurrentReadBatches(),
|
||||
request.getMaxOperationSizeInBytes(),
|
||||
request.getMaxConcurrentWriteBatches(),
|
||||
request.getMaxWriteBufferSize(),
|
||||
request.getRetryTimeout(),
|
||||
request.getIdleShardRetryDelay()
|
||||
);
|
||||
patterns.put(request.getLeaderClusterAlias(), autoFollowPattern);
|
||||
ClusterState.Builder newState = ClusterState.builder(localState);
|
||||
newState.metaData(MetaData.builder(localState.getMetaData())
|
||||
.putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, followedLeaderIndices))
|
||||
.build());
|
||||
return newState.build();
|
||||
}
|
||||
|
||||
private static void markExistingIndicesAsAutoFollowedForNewPatterns(
|
||||
List<String> leaderIndexPatterns,
|
||||
MetaData leaderMetaData,
|
||||
AutoFollowPattern previousPattern,
|
||||
List<String> followedIndexUUIDS) {
|
||||
|
||||
final List<String> newPatterns = leaderIndexPatterns
|
||||
.stream()
|
||||
.filter(p -> previousPattern.getLeaderIndexPatterns().contains(p) == false)
|
||||
.collect(Collectors.toList());
|
||||
markExistingIndicesAsAutoFollowed(newPatterns, leaderMetaData, followedIndexUUIDS);
|
||||
}
|
||||
|
||||
private static void markExistingIndicesAsAutoFollowed(
|
||||
List<String> patterns,
|
||||
MetaData leaderMetaData,
|
||||
List<String> followedIndexUUIDS) {
|
||||
|
||||
for (final IndexMetaData indexMetaData : leaderMetaData) {
|
||||
if (AutoFollowPattern.match(patterns, indexMetaData.getIndex().getName())) {
|
||||
followedIndexUUIDS.add(indexMetaData.getIndexUUID());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(PutAutoFollowPatternAction.Request request, ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.rest;
|
||||
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.RestToXContentListener;
|
||||
import org.elasticsearch.xpack.ccr.action.DeleteAutoFollowPatternAction.Request;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.ccr.action.DeleteAutoFollowPatternAction.INSTANCE;
|
||||
|
||||
public class RestDeleteAutoFollowPatternAction extends BaseRestHandler {
|
||||
|
||||
public RestDeleteAutoFollowPatternAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.DELETE, "/_ccr/_auto_follow/{leader_cluster_alias}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "ccr_delete_auto_follow_pattern_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
|
||||
Request request = new Request();
|
||||
request.setLeaderClusterAlias(restRequest.param("leader_cluster_alias"));
|
||||
return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.rest;
|
||||
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.RestToXContentListener;
|
||||
import org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction.Request;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction.INSTANCE;
|
||||
|
||||
public class RestPutAutoFollowPatternAction extends BaseRestHandler {
|
||||
|
||||
public RestPutAutoFollowPatternAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.PUT, "/_ccr/_auto_follow/{leader_cluster_alias}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "ccr_put_auto_follow_pattern_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
|
||||
Request request = createRequest(restRequest);
|
||||
return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel));
|
||||
}
|
||||
|
||||
static Request createRequest(RestRequest restRequest) throws IOException {
|
||||
try (XContentParser parser = restRequest.contentOrSourceParamParser()) {
|
||||
return Request.fromXContent(parser, restRequest.param("leader_cluster_alias"));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,296 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.action;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollower;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
import static org.mockito.Matchers.anyString;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class AutoFollowCoordinatorTests extends ESTestCase {
|
||||
|
||||
public void testAutoFollower() {
|
||||
Client client = mock(Client.class);
|
||||
when(client.getRemoteClusterClient(anyString())).thenReturn(client);
|
||||
|
||||
ClusterState leaderState = ClusterState.builder(new ClusterName("remote"))
|
||||
.metaData(MetaData.builder().put(IndexMetaData.builder("logs-20190101")
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0)))
|
||||
.build();
|
||||
|
||||
AutoFollowPattern autoFollowPattern =
|
||||
new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
Map<String, AutoFollowPattern> patterns = new HashMap<>();
|
||||
patterns.put("remote", autoFollowPattern);
|
||||
Map<String, List<String>> followedLeaderIndexUUIDS = new HashMap<>();
|
||||
followedLeaderIndexUUIDS.put("remote", new ArrayList<>());
|
||||
AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS);
|
||||
|
||||
ClusterState currentState = ClusterState.builder(new ClusterName("name"))
|
||||
.metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata))
|
||||
.build();
|
||||
|
||||
boolean[] invoked = new boolean[]{false};
|
||||
Consumer<Exception> handler = e -> {
|
||||
invoked[0] = true;
|
||||
assertThat(e, nullValue());
|
||||
};
|
||||
AutoFollower autoFollower = new AutoFollower(client, handler, currentState) {
|
||||
@Override
|
||||
void getLeaderClusterState(Client leaderClient, BiConsumer<ClusterState, Exception> handler) {
|
||||
handler.accept(leaderState, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer<Exception> failureHandler) {
|
||||
assertThat(followRequest.getLeaderIndex(), equalTo("remote:logs-20190101"));
|
||||
assertThat(followRequest.getFollowerIndex(), equalTo("logs-20190101"));
|
||||
successHandler.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
void updateAutoFollowMetadata(Function<ClusterState, ClusterState> updateFunction, Consumer<Exception> handler) {
|
||||
ClusterState resultCs = updateFunction.apply(currentState);
|
||||
AutoFollowMetadata result = resultCs.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1));
|
||||
assertThat(result.getFollowedLeaderIndexUUIDs().get("remote").size(), equalTo(1));
|
||||
handler.accept(null);
|
||||
}
|
||||
};
|
||||
autoFollower.autoFollowIndices();
|
||||
assertThat(invoked[0], is(true));
|
||||
}
|
||||
|
||||
public void testAutoFollowerClusterStateApiFailure() {
|
||||
Client client = mock(Client.class);
|
||||
when(client.getRemoteClusterClient(anyString())).thenReturn(client);
|
||||
|
||||
AutoFollowPattern autoFollowPattern =
|
||||
new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
Map<String, AutoFollowPattern> patterns = new HashMap<>();
|
||||
patterns.put("remote", autoFollowPattern);
|
||||
Map<String, List<String>> followedLeaderIndexUUIDS = new HashMap<>();
|
||||
followedLeaderIndexUUIDS.put("remote", new ArrayList<>());
|
||||
AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS);
|
||||
ClusterState followerState = ClusterState.builder(new ClusterName("remote"))
|
||||
.metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata))
|
||||
.build();
|
||||
|
||||
Exception failure = new RuntimeException("failure");
|
||||
boolean[] invoked = new boolean[]{false};
|
||||
Consumer<Exception> handler = e -> {
|
||||
invoked[0] = true;
|
||||
assertThat(e, sameInstance(failure));
|
||||
};
|
||||
AutoFollower autoFollower = new AutoFollower(client, handler, followerState) {
|
||||
@Override
|
||||
void getLeaderClusterState(Client leaderClient, BiConsumer<ClusterState, Exception> handler) {
|
||||
handler.accept(null, failure);
|
||||
}
|
||||
|
||||
@Override
|
||||
void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer<Exception> failureHandler) {
|
||||
fail("should not get here");
|
||||
}
|
||||
|
||||
@Override
|
||||
void updateAutoFollowMetadata(Function<ClusterState, ClusterState> updateFunction, Consumer<Exception> handler) {
|
||||
fail("should not get here");
|
||||
}
|
||||
};
|
||||
autoFollower.autoFollowIndices();
|
||||
assertThat(invoked[0], is(true));
|
||||
}
|
||||
|
||||
public void testAutoFollowerUpdateClusterStateFailure() {
|
||||
Client client = mock(Client.class);
|
||||
when(client.getRemoteClusterClient(anyString())).thenReturn(client);
|
||||
|
||||
ClusterState leaderState = ClusterState.builder(new ClusterName("remote"))
|
||||
.metaData(MetaData.builder().put(IndexMetaData.builder("logs-20190101")
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0)))
|
||||
.build();
|
||||
|
||||
AutoFollowPattern autoFollowPattern =
|
||||
new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
Map<String, AutoFollowPattern> patterns = new HashMap<>();
|
||||
patterns.put("remote", autoFollowPattern);
|
||||
Map<String, List<String>> followedLeaderIndexUUIDS = new HashMap<>();
|
||||
followedLeaderIndexUUIDS.put("remote", new ArrayList<>());
|
||||
AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS);
|
||||
ClusterState followerState = ClusterState.builder(new ClusterName("remote"))
|
||||
.metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata))
|
||||
.build();
|
||||
|
||||
Exception failure = new RuntimeException("failure");
|
||||
boolean[] invoked = new boolean[]{false};
|
||||
Consumer<Exception> handler = e -> {
|
||||
invoked[0] = true;
|
||||
assertThat(e, sameInstance(failure));
|
||||
};
|
||||
AutoFollower autoFollower = new AutoFollower(client, handler, followerState) {
|
||||
@Override
|
||||
void getLeaderClusterState(Client leaderClient, BiConsumer<ClusterState, Exception> handler) {
|
||||
handler.accept(leaderState, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer<Exception> failureHandler) {
|
||||
assertThat(followRequest.getLeaderIndex(), equalTo("remote:logs-20190101"));
|
||||
assertThat(followRequest.getFollowerIndex(), equalTo("logs-20190101"));
|
||||
successHandler.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
void updateAutoFollowMetadata(Function<ClusterState, ClusterState> updateFunction, Consumer<Exception> handler) {
|
||||
handler.accept(failure);
|
||||
}
|
||||
};
|
||||
autoFollower.autoFollowIndices();
|
||||
assertThat(invoked[0], is(true));
|
||||
}
|
||||
|
||||
public void testAutoFollowerCreateAndFollowApiCallFailure() {
|
||||
Client client = mock(Client.class);
|
||||
when(client.getRemoteClusterClient(anyString())).thenReturn(client);
|
||||
|
||||
ClusterState leaderState = ClusterState.builder(new ClusterName("remote"))
|
||||
.metaData(MetaData.builder().put(IndexMetaData.builder("logs-20190101")
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0)))
|
||||
.build();
|
||||
|
||||
AutoFollowPattern autoFollowPattern =
|
||||
new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
Map<String, AutoFollowPattern> patterns = new HashMap<>();
|
||||
patterns.put("remote", autoFollowPattern);
|
||||
Map<String, List<String>> followedLeaderIndexUUIDS = new HashMap<>();
|
||||
followedLeaderIndexUUIDS.put("remote", new ArrayList<>());
|
||||
AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS);
|
||||
ClusterState followerState = ClusterState.builder(new ClusterName("remote"))
|
||||
.metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata))
|
||||
.build();
|
||||
|
||||
Exception failure = new RuntimeException("failure");
|
||||
boolean[] invoked = new boolean[]{false};
|
||||
Consumer<Exception> handler = e -> {
|
||||
invoked[0] = true;
|
||||
assertThat(e, sameInstance(failure));
|
||||
};
|
||||
AutoFollower autoFollower = new AutoFollower(client, handler, followerState) {
|
||||
@Override
|
||||
void getLeaderClusterState(Client leaderClient, BiConsumer<ClusterState, Exception> handler) {
|
||||
handler.accept(leaderState, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer<Exception> failureHandler) {
|
||||
assertThat(followRequest.getLeaderIndex(), equalTo("remote:logs-20190101"));
|
||||
assertThat(followRequest.getFollowerIndex(), equalTo("logs-20190101"));
|
||||
failureHandler.accept(failure);
|
||||
}
|
||||
|
||||
@Override
|
||||
void updateAutoFollowMetadata(Function<ClusterState, ClusterState> updateFunction, Consumer<Exception> handler) {
|
||||
fail("should not get here");
|
||||
}
|
||||
};
|
||||
autoFollower.autoFollowIndices();
|
||||
assertThat(invoked[0], is(true));
|
||||
}
|
||||
|
||||
public void testGetLeaderIndicesToFollow() {
|
||||
AutoFollowPattern autoFollowPattern =
|
||||
new AutoFollowPattern(Collections.singletonList("metrics-*"), null, null, null, null, null, null, null, null);
|
||||
ClusterState followerState = ClusterState.builder(new ClusterName("remote"))
|
||||
.metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE,
|
||||
new AutoFollowMetadata(Collections.singletonMap("remote", autoFollowPattern), Collections.emptyMap())))
|
||||
.build();
|
||||
|
||||
MetaData.Builder imdBuilder = MetaData.builder();
|
||||
for (int i = 0; i < 5; i++) {
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, "metrics-" + i);
|
||||
imdBuilder.put(IndexMetaData.builder("metrics-" + i)
|
||||
.settings(builder)
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0));
|
||||
}
|
||||
imdBuilder.put(IndexMetaData.builder("logs-0")
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0));
|
||||
|
||||
ClusterState leaderState = ClusterState.builder(new ClusterName("remote"))
|
||||
.metaData(imdBuilder)
|
||||
.build();
|
||||
|
||||
List<Index> result = AutoFollower.getLeaderIndicesToFollow(autoFollowPattern, leaderState, followerState, Collections.emptyList());
|
||||
result.sort(Comparator.comparing(Index::getName));
|
||||
assertThat(result.size(), equalTo(5));
|
||||
assertThat(result.get(0).getName(), equalTo("metrics-0"));
|
||||
assertThat(result.get(1).getName(), equalTo("metrics-1"));
|
||||
assertThat(result.get(2).getName(), equalTo("metrics-2"));
|
||||
assertThat(result.get(3).getName(), equalTo("metrics-3"));
|
||||
assertThat(result.get(4).getName(), equalTo("metrics-4"));
|
||||
|
||||
List<String> followedIndexUUIDs = Collections.singletonList(leaderState.metaData().index("metrics-2").getIndexUUID());
|
||||
result = AutoFollower.getLeaderIndicesToFollow(autoFollowPattern, leaderState, followerState, followedIndexUUIDs);
|
||||
result.sort(Comparator.comparing(Index::getName));
|
||||
assertThat(result.size(), equalTo(4));
|
||||
assertThat(result.get(0).getName(), equalTo("metrics-0"));
|
||||
assertThat(result.get(1).getName(), equalTo("metrics-1"));
|
||||
assertThat(result.get(2).getName(), equalTo("metrics-3"));
|
||||
assertThat(result.get(3).getName(), equalTo("metrics-4"));
|
||||
}
|
||||
|
||||
public void testGetFollowerIndexName() {
|
||||
AutoFollowPattern autoFollowPattern = new AutoFollowPattern(Collections.singletonList("metrics-*"), null, null,
|
||||
null, null, null, null, null, null);
|
||||
assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("metrics-0"));
|
||||
|
||||
autoFollowPattern = new AutoFollowPattern(Collections.singletonList("metrics-*"), "eu-metrics-0", null, null,
|
||||
null, null, null, null, null);
|
||||
assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0"));
|
||||
|
||||
autoFollowPattern = new AutoFollowPattern(Collections.singletonList("metrics-*"), "eu-{{leader_index}}", null,
|
||||
null, null, null, null, null, null);
|
||||
assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0"));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,189 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.action;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.xpack.ccr.LocalStateCcr;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class AutoFollowTests extends ESSingleNodeTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return Collections.singleton(LocalStateCcr.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean resetNodeAfterTest() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void testAutoFollow() throws Exception {
|
||||
Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
|
||||
createIndex("logs-201812", leaderIndexSettings, "_doc");
|
||||
|
||||
// Enabling auto following:
|
||||
putAutoFollowPatterns("logs-*", "transactions-*");
|
||||
|
||||
createIndex("metrics-201901", leaderIndexSettings, "_doc");
|
||||
|
||||
createIndex("logs-201901", leaderIndexSettings, "_doc");
|
||||
assertBusy(() -> {
|
||||
IndicesExistsRequest request = new IndicesExistsRequest("copy-logs-201901");
|
||||
assertTrue(client().admin().indices().exists(request).actionGet().isExists());
|
||||
});
|
||||
createIndex("transactions-201901", leaderIndexSettings, "_doc");
|
||||
assertBusy(() -> {
|
||||
IndicesExistsRequest request = new IndicesExistsRequest("copy-transactions-201901");
|
||||
assertTrue(client().admin().indices().exists(request).actionGet().isExists());
|
||||
});
|
||||
|
||||
IndicesExistsRequest request = new IndicesExistsRequest("copy-metrics-201901");
|
||||
assertFalse(client().admin().indices().exists(request).actionGet().isExists());
|
||||
request = new IndicesExistsRequest("copy-logs-201812");
|
||||
assertFalse(client().admin().indices().exists(request).actionGet().isExists());
|
||||
}
|
||||
|
||||
public void testAutoFollowManyIndices() throws Exception {
|
||||
Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
|
||||
putAutoFollowPatterns("logs-*");
|
||||
int numIndices = randomIntBetween(4, 32);
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
createIndex("logs-" + i, leaderIndexSettings, "_doc");
|
||||
}
|
||||
int expectedVal1 = numIndices;
|
||||
assertBusy(() -> {
|
||||
MetaData metaData = client().admin().cluster().prepareState().get().getState().metaData();
|
||||
int count = (int) Arrays.stream(metaData.getConcreteAllIndices()).filter(s -> s.startsWith("copy-")).count();
|
||||
assertThat(count, equalTo(expectedVal1));
|
||||
});
|
||||
|
||||
deleteAutoFollowPatternSetting();
|
||||
createIndex("logs-does-not-count", leaderIndexSettings, "_doc");
|
||||
|
||||
putAutoFollowPatterns("logs-*");
|
||||
int i = numIndices;
|
||||
numIndices = numIndices + randomIntBetween(4, 32);
|
||||
for (; i < numIndices; i++) {
|
||||
createIndex("logs-" + i, leaderIndexSettings, "_doc");
|
||||
}
|
||||
int expectedVal2 = numIndices;
|
||||
assertBusy(() -> {
|
||||
MetaData metaData = client().admin().cluster().prepareState().get().getState().metaData();
|
||||
int count = (int) Arrays.stream(metaData.getConcreteAllIndices()).filter(s -> s.startsWith("copy-")).count();
|
||||
assertThat(count, equalTo(expectedVal2));
|
||||
});
|
||||
}
|
||||
|
||||
public void testAutoFollowParameterAreDelegated() throws Exception {
|
||||
Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
|
||||
// Enabling auto following:
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setLeaderClusterAlias("_local_");
|
||||
request.setLeaderIndexPatterns(Collections.singletonList("logs-*"));
|
||||
// Need to set this, because following an index in the same cluster
|
||||
request.setFollowIndexNamePattern("copy-{{leader_index}}");
|
||||
if (randomBoolean()) {
|
||||
request.setMaxWriteBufferSize(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setMaxConcurrentReadBatches(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setMaxConcurrentWriteBatches(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setMaxBatchOperationCount(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setMaxOperationSizeInBytes(randomNonNegativeLong());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setRetryTimeout(TimeValue.timeValueMillis(500));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setIdleShardRetryDelay(TimeValue.timeValueMillis(500));
|
||||
}
|
||||
assertTrue(client().execute(PutAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged());
|
||||
|
||||
createIndex("logs-201901", leaderIndexSettings, "_doc");
|
||||
assertBusy(() -> {
|
||||
PersistentTasksCustomMetaData persistentTasksMetaData =
|
||||
client().admin().cluster().prepareState().get().getState().getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
assertThat(persistentTasksMetaData, notNullValue());
|
||||
assertThat(persistentTasksMetaData.tasks().size(), equalTo(1));
|
||||
ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTasksMetaData.tasks().iterator().next().getParams();
|
||||
assertThat(shardFollowTask.getLeaderShardId().getIndexName(), equalTo("logs-201901"));
|
||||
assertThat(shardFollowTask.getFollowShardId().getIndexName(), equalTo("copy-logs-201901"));
|
||||
if (request.getMaxWriteBufferSize() != null) {
|
||||
assertThat(shardFollowTask.getMaxWriteBufferSize(), equalTo(request.getMaxWriteBufferSize()));
|
||||
}
|
||||
if (request.getMaxConcurrentReadBatches() != null) {
|
||||
assertThat(shardFollowTask.getMaxConcurrentReadBatches(), equalTo(request.getMaxConcurrentReadBatches()));
|
||||
}
|
||||
if (request.getMaxConcurrentWriteBatches() != null) {
|
||||
assertThat(shardFollowTask.getMaxConcurrentWriteBatches(), equalTo(request.getMaxConcurrentWriteBatches()));
|
||||
}
|
||||
if (request.getMaxBatchOperationCount() != null) {
|
||||
assertThat(shardFollowTask.getMaxBatchOperationCount(), equalTo(request.getMaxBatchOperationCount()));
|
||||
}
|
||||
if (request.getMaxOperationSizeInBytes() != null) {
|
||||
assertThat(shardFollowTask.getMaxBatchSizeInBytes(), equalTo(request.getMaxOperationSizeInBytes()));
|
||||
}
|
||||
if (request.getRetryTimeout() != null) {
|
||||
assertThat(shardFollowTask.getRetryTimeout(), equalTo(request.getRetryTimeout()));
|
||||
}
|
||||
if (request.getIdleShardRetryDelay() != null) {
|
||||
assertThat(shardFollowTask.getIdleShardRetryDelay(), equalTo(request.getIdleShardRetryDelay()));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void putAutoFollowPatterns(String... patterns) {
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setLeaderClusterAlias("_local_");
|
||||
request.setLeaderIndexPatterns(Arrays.asList(patterns));
|
||||
// Need to set this, because following an index in the same cluster
|
||||
request.setFollowIndexNamePattern("copy-{{leader_index}}");
|
||||
assertTrue(client().execute(PutAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged());
|
||||
}
|
||||
|
||||
private void deleteAutoFollowPatternSetting() {
|
||||
DeleteAutoFollowPatternAction.Request request = new DeleteAutoFollowPatternAction.Request();
|
||||
request.setLeaderClusterAlias("_local_");
|
||||
assertTrue(client().execute(DeleteAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.action;
|
||||
|
||||
import org.elasticsearch.test.AbstractStreamableTestCase;
|
||||
|
||||
public class DeleteAutoFollowPatternRequestTests extends AbstractStreamableTestCase<DeleteAutoFollowPatternAction.Request> {
|
||||
|
||||
@Override
|
||||
protected DeleteAutoFollowPatternAction.Request createBlankInstance() {
|
||||
return new DeleteAutoFollowPatternAction.Request();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DeleteAutoFollowPatternAction.Request createTestInstance() {
|
||||
DeleteAutoFollowPatternAction.Request request = new DeleteAutoFollowPatternAction.Request();
|
||||
request.setLeaderClusterAlias(randomAlphaOfLength(4));
|
||||
return request;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.action;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractStreamableXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
public class PutAutoFollowPatternRequestTests extends AbstractStreamableXContentTestCase<PutAutoFollowPatternAction.Request> {
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PutAutoFollowPatternAction.Request doParseInstance(XContentParser parser) throws IOException {
|
||||
return PutAutoFollowPatternAction.Request.fromXContent(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PutAutoFollowPatternAction.Request createBlankInstance() {
|
||||
return new PutAutoFollowPatternAction.Request();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PutAutoFollowPatternAction.Request createTestInstance() {
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setLeaderClusterAlias(randomAlphaOfLength(4));
|
||||
request.setLeaderIndexPatterns(Arrays.asList(generateRandomStringArray(4, 4, false)));
|
||||
if (randomBoolean()) {
|
||||
request.setFollowIndexNamePattern(randomAlphaOfLength(4));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setIdleShardRetryDelay(TimeValue.timeValueMillis(500));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setRetryTimeout(TimeValue.timeValueMillis(500));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setMaxBatchOperationCount(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setMaxConcurrentReadBatches(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setMaxConcurrentWriteBatches(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setMaxOperationSizeInBytes(randomNonNegativeLong());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setMaxWriteBufferSize(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
return request;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.action;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.ccr.action.DeleteAutoFollowPatternAction.Request;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class TransportDeleteAutoFollowPatternActionTests extends ESTestCase {
|
||||
|
||||
public void testInnerDelete() {
|
||||
Map<String, List<String>> existingAlreadyFollowedIndexUUIDS = new HashMap<>();
|
||||
Map<String, AutoFollowMetadata.AutoFollowPattern> existingAutoFollowPatterns = new HashMap<>();
|
||||
{
|
||||
List<String> existingPatterns = new ArrayList<>();
|
||||
existingPatterns.add("transactions-*");
|
||||
existingAutoFollowPatterns.put("eu_cluster",
|
||||
new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null));
|
||||
|
||||
List<String> existingUUIDS = new ArrayList<>();
|
||||
existingUUIDS.add("_val");
|
||||
existingAlreadyFollowedIndexUUIDS.put("eu_cluster", existingUUIDS);
|
||||
}
|
||||
{
|
||||
List<String> existingPatterns = new ArrayList<>();
|
||||
existingPatterns.add("logs-*");
|
||||
existingAutoFollowPatterns.put("asia_cluster",
|
||||
new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null));
|
||||
|
||||
List<String> existingUUIDS = new ArrayList<>();
|
||||
existingUUIDS.add("_val");
|
||||
existingAlreadyFollowedIndexUUIDS.put("asia_cluster", existingUUIDS);
|
||||
}
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster"))
|
||||
.metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE,
|
||||
new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS)))
|
||||
.build();
|
||||
|
||||
Request request = new Request();
|
||||
request.setLeaderClusterAlias("eu_cluster");
|
||||
AutoFollowMetadata result = TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState)
|
||||
.getMetaData()
|
||||
.custom(AutoFollowMetadata.TYPE);
|
||||
assertThat(result.getPatterns().size(), equalTo(1));
|
||||
assertThat(result.getPatterns().get("asia_cluster"), notNullValue());
|
||||
assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1));
|
||||
assertThat(result.getFollowedLeaderIndexUUIDs().get("asia_cluster"), notNullValue());
|
||||
}
|
||||
|
||||
public void testInnerDeleteDoesNotExist() {
|
||||
Map<String, List<String>> existingAlreadyFollowedIndexUUIDS = new HashMap<>();
|
||||
Map<String, AutoFollowMetadata.AutoFollowPattern> existingAutoFollowPatterns = new HashMap<>();
|
||||
{
|
||||
List<String> existingPatterns = new ArrayList<>();
|
||||
existingPatterns.add("transactions-*");
|
||||
existingAutoFollowPatterns.put("eu_cluster",
|
||||
new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null));
|
||||
}
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster"))
|
||||
.metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE,
|
||||
new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS)))
|
||||
.build();
|
||||
|
||||
Request request = new Request();
|
||||
request.setLeaderClusterAlias("asia_cluster");
|
||||
Exception e = expectThrows(ResourceNotFoundException.class,
|
||||
() -> TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState));
|
||||
assertThat(e.getMessage(), equalTo("no auto-follow patterns for cluster alias [asia_cluster] found"));
|
||||
}
|
||||
|
||||
public void testInnerDeleteNoAutoFollowMetadata() {
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster"))
|
||||
.metaData(MetaData.builder())
|
||||
.build();
|
||||
|
||||
Request request = new Request();
|
||||
request.setLeaderClusterAlias("asia_cluster");
|
||||
Exception e = expectThrows(ResourceNotFoundException.class,
|
||||
() -> TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState));
|
||||
assertThat(e.getMessage(), equalTo("no auto-follow patterns for cluster alias [asia_cluster] found"));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ccr.action;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class TransportPutAutoFollowPatternActionTests extends ESTestCase {
|
||||
|
||||
public void testInnerPut() {
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setLeaderClusterAlias("eu_cluster");
|
||||
request.setLeaderIndexPatterns(Collections.singletonList("logs-*"));
|
||||
|
||||
ClusterState localState = ClusterState.builder(new ClusterName("us_cluster"))
|
||||
.metaData(MetaData.builder())
|
||||
.build();
|
||||
|
||||
ClusterState remoteState = ClusterState.builder(new ClusterName("eu_cluster"))
|
||||
.metaData(MetaData.builder())
|
||||
.build();
|
||||
|
||||
ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, localState, remoteState);
|
||||
AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
assertThat(autoFollowMetadata, notNullValue());
|
||||
assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().get(0), equalTo("logs-*"));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("eu_cluster").size(), equalTo(0));
|
||||
}
|
||||
|
||||
public void testInnerPut_existingLeaderIndices() {
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setLeaderClusterAlias("eu_cluster");
|
||||
request.setLeaderIndexPatterns(Collections.singletonList("logs-*"));
|
||||
|
||||
ClusterState localState = ClusterState.builder(new ClusterName("us_cluster"))
|
||||
.metaData(MetaData.builder())
|
||||
.build();
|
||||
|
||||
int numLeaderIndices = randomIntBetween(1, 8);
|
||||
int numMatchingLeaderIndices = randomIntBetween(1, 8);
|
||||
MetaData.Builder mdBuilder = MetaData.builder();
|
||||
for (int i = 0; i < numLeaderIndices; i++) {
|
||||
mdBuilder.put(IndexMetaData.builder("transactions-" + i)
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0));
|
||||
}
|
||||
for (int i = 0; i < numMatchingLeaderIndices; i++) {
|
||||
mdBuilder.put(IndexMetaData.builder("logs-" + i)
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0));
|
||||
}
|
||||
|
||||
ClusterState remoteState = ClusterState.builder(new ClusterName("eu_cluster"))
|
||||
.metaData(mdBuilder)
|
||||
.build();
|
||||
|
||||
ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, localState, remoteState);
|
||||
AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
assertThat(autoFollowMetadata, notNullValue());
|
||||
assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().get(0), equalTo("logs-*"));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("eu_cluster").size(), equalTo(numMatchingLeaderIndices));
|
||||
}
|
||||
|
||||
public void testInnerPut_existingLeaderIndicesAndAutoFollowMetadata() {
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setLeaderClusterAlias("eu_cluster");
|
||||
request.setLeaderIndexPatterns(Arrays.asList("logs-*", "transactions-*"));
|
||||
|
||||
Map<String, AutoFollowMetadata.AutoFollowPattern> existingAutoFollowPatterns = new HashMap<>();
|
||||
List<String> existingPatterns = new ArrayList<>();
|
||||
existingPatterns.add("transactions-*");
|
||||
existingAutoFollowPatterns.put("eu_cluster",
|
||||
new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null));
|
||||
Map<String, List<String>> existingAlreadyFollowedIndexUUIDS = new HashMap<>();
|
||||
List<String> existingUUIDS = new ArrayList<>();
|
||||
existingUUIDS.add("_val");
|
||||
existingAlreadyFollowedIndexUUIDS.put("eu_cluster", existingUUIDS);
|
||||
ClusterState localState = ClusterState.builder(new ClusterName("us_cluster"))
|
||||
.metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE,
|
||||
new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS)))
|
||||
.build();
|
||||
|
||||
int numLeaderIndices = randomIntBetween(1, 8);
|
||||
MetaData.Builder mdBuilder = MetaData.builder();
|
||||
for (int i = 0; i < numLeaderIndices; i++) {
|
||||
mdBuilder.put(IndexMetaData.builder("logs-" + i)
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0));
|
||||
}
|
||||
|
||||
ClusterState remoteState = ClusterState.builder(new ClusterName("eu_cluster"))
|
||||
.metaData(mdBuilder)
|
||||
.build();
|
||||
|
||||
ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, localState, remoteState);
|
||||
AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
assertThat(autoFollowMetadata, notNullValue());
|
||||
assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().size(), equalTo(2));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().get(0), equalTo("logs-*"));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().get(1), equalTo("transactions-*"));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("eu_cluster").size(), equalTo(numLeaderIndices + 1));
|
||||
}
|
||||
|
||||
}
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.xpack.core.action.XPackInfoAction;
|
||||
import org.elasticsearch.xpack.core.action.XPackUsageAction;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata;
|
||||
import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction;
|
||||
import org.elasticsearch.xpack.core.graph.GraphFeatureSetUsage;
|
||||
import org.elasticsearch.xpack.core.graph.action.GraphExploreAction;
|
||||
|
@ -395,6 +396,8 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
|
|||
new NamedWriteableRegistry.Entry(PersistentTaskParams.class, RollupJob.NAME, RollupJob::new),
|
||||
new NamedWriteableRegistry.Entry(Task.Status.class, RollupJobStatus.NAME, RollupJobStatus::new),
|
||||
new NamedWriteableRegistry.Entry(PersistentTaskState.class, RollupJobStatus.NAME, RollupJobStatus::new),
|
||||
// ccr
|
||||
new NamedWriteableRegistry.Entry(AutoFollowMetadata.class, AutoFollowMetadata.TYPE, AutoFollowMetadata::new),
|
||||
// ILM
|
||||
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX_LIFECYCLE,
|
||||
IndexLifecycleFeatureSetUsage::new),
|
||||
|
|
|
@ -0,0 +1,357 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ccr;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.AbstractNamedDiffable;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.core.XPackPlugin;
|
||||
import org.elasticsearch.xpack.core.security.xcontent.XContentUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Custom metadata that contains auto follow patterns and what leader indices an auto follow pattern has already followed.
|
||||
*/
|
||||
public class AutoFollowMetadata extends AbstractNamedDiffable<MetaData.Custom> implements XPackPlugin.XPackMetaDataCustom {
|
||||
|
||||
public static final String TYPE = "ccr_auto_follow";
|
||||
|
||||
private static final ParseField PATTERNS_FIELD = new ParseField("patterns");
|
||||
private static final ParseField FOLLOWED_LEADER_INDICES_FIELD = new ParseField("followed_leader_indices");
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static final ConstructingObjectParser<AutoFollowMetadata, Void> PARSER = new ConstructingObjectParser<>("auto_follow",
|
||||
args -> new AutoFollowMetadata((Map<String, AutoFollowPattern>) args[0], (Map<String, List<String>>) args[1]));
|
||||
|
||||
static {
|
||||
PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> {
|
||||
Map<String, AutoFollowPattern> patterns = new HashMap<>();
|
||||
String fieldName = null;
|
||||
for (XContentParser.Token token = p.nextToken(); token != XContentParser.Token.END_OBJECT; token = p.nextToken()) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
fieldName = p.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
patterns.put(fieldName, AutoFollowPattern.PARSER.parse(p, c));
|
||||
} else {
|
||||
throw new ElasticsearchParseException("unexpected token [" + token + "]");
|
||||
}
|
||||
}
|
||||
return patterns;
|
||||
}, PATTERNS_FIELD);
|
||||
PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> {
|
||||
Map<String, List<String>> alreadyFollowedIndexUUIDS = new HashMap<>();
|
||||
String fieldName = null;
|
||||
for (XContentParser.Token token = p.nextToken(); token != XContentParser.Token.END_OBJECT; token = p.nextToken()) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
fieldName = p.currentName();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
alreadyFollowedIndexUUIDS.put(fieldName, Arrays.asList(XContentUtils.readStringArray(p, false)));
|
||||
} else {
|
||||
throw new ElasticsearchParseException("unexpected token [" + token + "]");
|
||||
}
|
||||
}
|
||||
return alreadyFollowedIndexUUIDS;
|
||||
}, FOLLOWED_LEADER_INDICES_FIELD);
|
||||
}
|
||||
|
||||
public static AutoFollowMetadata fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
private final Map<String, AutoFollowPattern> patterns;
|
||||
private final Map<String, List<String>> followedLeaderIndexUUIDs;
|
||||
|
||||
public AutoFollowMetadata(Map<String, AutoFollowPattern> patterns, Map<String, List<String>> followedLeaderIndexUUIDs) {
|
||||
this.patterns = patterns;
|
||||
this.followedLeaderIndexUUIDs = followedLeaderIndexUUIDs;
|
||||
}
|
||||
|
||||
public AutoFollowMetadata(StreamInput in) throws IOException {
|
||||
patterns = in.readMap(StreamInput::readString, AutoFollowPattern::new);
|
||||
followedLeaderIndexUUIDs = in.readMapOfLists(StreamInput::readString, StreamInput::readString);
|
||||
}
|
||||
|
||||
public Map<String, AutoFollowPattern> getPatterns() {
|
||||
return patterns;
|
||||
}
|
||||
|
||||
public Map<String, List<String>> getFollowedLeaderIndexUUIDs() {
|
||||
return followedLeaderIndexUUIDs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public EnumSet<MetaData.XContentContext> context() {
|
||||
// TODO: When a snapshot is restored do we want to restore this?
|
||||
// (Otherwise we would start following indices automatically immediately)
|
||||
return MetaData.ALL_CONTEXTS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Version getMinimalSupportedVersion() {
|
||||
return Version.V_6_5_0.minimumCompatibilityVersion();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeMap(patterns, StreamOutput::writeString, (out1, value) -> value.writeTo(out1));
|
||||
out.writeMapOfLists(followedLeaderIndexUUIDs, StreamOutput::writeString, StreamOutput::writeString);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(PATTERNS_FIELD.getPreferredName());
|
||||
for (Map.Entry<String, AutoFollowPattern> entry : patterns.entrySet()) {
|
||||
builder.startObject(entry.getKey());
|
||||
builder.value(entry.getValue());
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject(FOLLOWED_LEADER_INDICES_FIELD.getPreferredName());
|
||||
for (Map.Entry<String, List<String>> entry : followedLeaderIndexUUIDs.entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isFragment() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
AutoFollowMetadata that = (AutoFollowMetadata) o;
|
||||
return Objects.equals(patterns, that.patterns);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(patterns);
|
||||
}
|
||||
|
||||
public static class AutoFollowPattern implements Writeable, ToXContentObject {
|
||||
|
||||
private static final ParseField LEADER_PATTERNS_FIELD = new ParseField("leader_patterns");
|
||||
private static final ParseField FOLLOW_PATTERN_FIELD = new ParseField("follow_pattern");
|
||||
public static final ParseField MAX_BATCH_OPERATION_COUNT = new ParseField("max_batch_operation_count");
|
||||
public static final ParseField MAX_CONCURRENT_READ_BATCHES = new ParseField("max_concurrent_read_batches");
|
||||
public static final ParseField MAX_BATCH_SIZE_IN_BYTES = new ParseField("max_batch_size_in_bytes");
|
||||
public static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches");
|
||||
public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size");
|
||||
public static final ParseField RETRY_TIMEOUT = new ParseField("retry_timeout");
|
||||
public static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay");
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static final ConstructingObjectParser<AutoFollowPattern, Void> PARSER =
|
||||
new ConstructingObjectParser<>("auto_follow_pattern",
|
||||
args -> new AutoFollowPattern((List<String>) args[0], (String) args[1], (Integer) args[2], (Integer) args[3],
|
||||
(Long) args[4], (Integer) args[5], (Integer) args[6], (TimeValue) args[7], (TimeValue) args[8]));
|
||||
|
||||
static {
|
||||
PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), LEADER_PATTERNS_FIELD);
|
||||
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOW_PATTERN_FIELD);
|
||||
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_BATCH_OPERATION_COUNT);
|
||||
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_READ_BATCHES);
|
||||
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MAX_BATCH_SIZE_IN_BYTES);
|
||||
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_WRITE_BATCHES);
|
||||
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_BUFFER_SIZE);
|
||||
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(),
|
||||
(p, c) -> TimeValue.parseTimeValue(p.text(), RETRY_TIMEOUT.getPreferredName()),
|
||||
RETRY_TIMEOUT, ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(),
|
||||
(p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()),
|
||||
IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING);
|
||||
}
|
||||
|
||||
private final List<String> leaderIndexPatterns;
|
||||
private final String followIndexPattern;
|
||||
private final Integer maxBatchOperationCount;
|
||||
private final Integer maxConcurrentReadBatches;
|
||||
private final Long maxOperationSizeInBytes;
|
||||
private final Integer maxConcurrentWriteBatches;
|
||||
private final Integer maxWriteBufferSize;
|
||||
private final TimeValue retryTimeout;
|
||||
private final TimeValue idleShardRetryDelay;
|
||||
|
||||
public AutoFollowPattern(List<String> leaderIndexPatterns, String followIndexPattern, Integer maxBatchOperationCount,
|
||||
Integer maxConcurrentReadBatches, Long maxOperationSizeInBytes, Integer maxConcurrentWriteBatches,
|
||||
Integer maxWriteBufferSize, TimeValue retryTimeout, TimeValue idleShardRetryDelay) {
|
||||
this.leaderIndexPatterns = leaderIndexPatterns;
|
||||
this.followIndexPattern = followIndexPattern;
|
||||
this.maxBatchOperationCount = maxBatchOperationCount;
|
||||
this.maxConcurrentReadBatches = maxConcurrentReadBatches;
|
||||
this.maxOperationSizeInBytes = maxOperationSizeInBytes;
|
||||
this.maxConcurrentWriteBatches = maxConcurrentWriteBatches;
|
||||
this.maxWriteBufferSize = maxWriteBufferSize;
|
||||
this.retryTimeout = retryTimeout;
|
||||
this.idleShardRetryDelay = idleShardRetryDelay;
|
||||
}
|
||||
|
||||
AutoFollowPattern(StreamInput in) throws IOException {
|
||||
leaderIndexPatterns = in.readList(StreamInput::readString);
|
||||
followIndexPattern = in.readOptionalString();
|
||||
maxBatchOperationCount = in.readOptionalVInt();
|
||||
maxConcurrentReadBatches = in.readOptionalVInt();
|
||||
maxOperationSizeInBytes = in.readOptionalLong();
|
||||
maxConcurrentWriteBatches = in.readOptionalVInt();
|
||||
maxWriteBufferSize = in.readOptionalVInt();
|
||||
retryTimeout = in.readOptionalTimeValue();
|
||||
idleShardRetryDelay = in.readOptionalTimeValue();
|
||||
}
|
||||
|
||||
public boolean match(String indexName) {
|
||||
return match(leaderIndexPatterns, indexName);
|
||||
}
|
||||
|
||||
public static boolean match(List<String> leaderIndexPatterns, String indexName) {
|
||||
return Regex.simpleMatch(leaderIndexPatterns, indexName);
|
||||
}
|
||||
|
||||
public List<String> getLeaderIndexPatterns() {
|
||||
return leaderIndexPatterns;
|
||||
}
|
||||
|
||||
public String getFollowIndexPattern() {
|
||||
return followIndexPattern;
|
||||
}
|
||||
|
||||
public Integer getMaxBatchOperationCount() {
|
||||
return maxBatchOperationCount;
|
||||
}
|
||||
|
||||
public Integer getMaxConcurrentReadBatches() {
|
||||
return maxConcurrentReadBatches;
|
||||
}
|
||||
|
||||
public Long getMaxOperationSizeInBytes() {
|
||||
return maxOperationSizeInBytes;
|
||||
}
|
||||
|
||||
public Integer getMaxConcurrentWriteBatches() {
|
||||
return maxConcurrentWriteBatches;
|
||||
}
|
||||
|
||||
public Integer getMaxWriteBufferSize() {
|
||||
return maxWriteBufferSize;
|
||||
}
|
||||
|
||||
public TimeValue getRetryTimeout() {
|
||||
return retryTimeout;
|
||||
}
|
||||
|
||||
public TimeValue getIdleShardRetryDelay() {
|
||||
return idleShardRetryDelay;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeStringList(leaderIndexPatterns);
|
||||
out.writeOptionalString(followIndexPattern);
|
||||
out.writeOptionalVInt(maxBatchOperationCount);
|
||||
out.writeOptionalVInt(maxConcurrentReadBatches);
|
||||
out.writeOptionalLong(maxOperationSizeInBytes);
|
||||
out.writeOptionalVInt(maxConcurrentWriteBatches);
|
||||
out.writeOptionalVInt(maxWriteBufferSize);
|
||||
out.writeOptionalTimeValue(retryTimeout);
|
||||
out.writeOptionalTimeValue(idleShardRetryDelay);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.array(LEADER_PATTERNS_FIELD.getPreferredName(), leaderIndexPatterns.toArray(new String[0]));
|
||||
if (followIndexPattern != null) {
|
||||
builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), followIndexPattern);
|
||||
}
|
||||
if (maxBatchOperationCount != null) {
|
||||
builder.field(MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount);
|
||||
}
|
||||
if (maxConcurrentReadBatches != null) {
|
||||
builder.field(MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches);
|
||||
}
|
||||
if (maxOperationSizeInBytes != null) {
|
||||
builder.field(MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes);
|
||||
}
|
||||
if (maxConcurrentWriteBatches != null) {
|
||||
builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches);
|
||||
}
|
||||
if (maxWriteBufferSize != null){
|
||||
builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize);
|
||||
}
|
||||
if (retryTimeout != null) {
|
||||
builder.field(RETRY_TIMEOUT.getPreferredName(), retryTimeout);
|
||||
}
|
||||
if (idleShardRetryDelay != null) {
|
||||
builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isFragment() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
AutoFollowPattern that = (AutoFollowPattern) o;
|
||||
return Objects.equals(leaderIndexPatterns, that.leaderIndexPatterns) &&
|
||||
Objects.equals(followIndexPattern, that.followIndexPattern) &&
|
||||
Objects.equals(maxBatchOperationCount, that.maxBatchOperationCount) &&
|
||||
Objects.equals(maxConcurrentReadBatches, that.maxConcurrentReadBatches) &&
|
||||
Objects.equals(maxOperationSizeInBytes, that.maxOperationSizeInBytes) &&
|
||||
Objects.equals(maxConcurrentWriteBatches, that.maxConcurrentWriteBatches) &&
|
||||
Objects.equals(maxWriteBufferSize, that.maxWriteBufferSize) &&
|
||||
Objects.equals(retryTimeout, that.retryTimeout) &&
|
||||
Objects.equals(idleShardRetryDelay, that.idleShardRetryDelay);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(
|
||||
leaderIndexPatterns,
|
||||
followIndexPattern,
|
||||
maxBatchOperationCount,
|
||||
maxConcurrentReadBatches,
|
||||
maxOperationSizeInBytes,
|
||||
maxConcurrentWriteBatches,
|
||||
maxWriteBufferSize,
|
||||
retryTimeout,
|
||||
idleShardRetryDelay
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -12,6 +12,7 @@ import io.netty.channel.ChannelOutboundHandlerAdapter;
|
|||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.handler.ssl.SslHandler;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.network.CloseableChannel;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
|
@ -19,6 +20,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.TcpChannel;
|
||||
import org.elasticsearch.transport.TcpTransport;
|
||||
import org.elasticsearch.transport.netty4.Netty4Transport;
|
||||
|
@ -27,7 +29,10 @@ import org.elasticsearch.xpack.core.security.transport.SSLExceptionHelper;
|
|||
import org.elasticsearch.xpack.core.ssl.SSLConfiguration;
|
||||
import org.elasticsearch.xpack.core.ssl.SSLService;
|
||||
|
||||
import javax.net.ssl.SNIHostName;
|
||||
import javax.net.ssl.SNIServerName;
|
||||
import javax.net.ssl.SSLEngine;
|
||||
import javax.net.ssl.SSLParameters;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketAddress;
|
||||
import java.util.Collections;
|
||||
|
@ -106,8 +111,8 @@ public class SecurityNetty4Transport extends Netty4Transport {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected ChannelHandler getClientChannelInitializer() {
|
||||
return new SecurityClientChannelInitializer();
|
||||
protected ChannelHandler getClientChannelInitializer(DiscoveryNode node) {
|
||||
return new SecurityClientChannelInitializer(node);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -167,16 +172,28 @@ public class SecurityNetty4Transport extends Netty4Transport {
|
|||
private class SecurityClientChannelInitializer extends ClientChannelInitializer {
|
||||
|
||||
private final boolean hostnameVerificationEnabled;
|
||||
private final SNIHostName serverName;
|
||||
|
||||
SecurityClientChannelInitializer() {
|
||||
SecurityClientChannelInitializer(DiscoveryNode node) {
|
||||
this.hostnameVerificationEnabled = sslEnabled && sslConfiguration.verificationMode().isHostnameVerificationEnabled();
|
||||
String configuredServerName = node.getAttributes().get("server_name");
|
||||
if (configuredServerName != null) {
|
||||
try {
|
||||
serverName = new SNIHostName(configuredServerName);
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new ConnectTransportException(node, "invalid DiscoveryNode server_name [" + configuredServerName + "]", e);
|
||||
}
|
||||
} else {
|
||||
serverName = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void initChannel(Channel ch) throws Exception {
|
||||
super.initChannel(ch);
|
||||
if (sslEnabled) {
|
||||
ch.pipeline().addFirst(new ClientSslHandlerInitializer(sslConfiguration, sslService, hostnameVerificationEnabled));
|
||||
ch.pipeline().addFirst(new ClientSslHandlerInitializer(sslConfiguration, sslService, hostnameVerificationEnabled,
|
||||
serverName));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -186,11 +203,14 @@ public class SecurityNetty4Transport extends Netty4Transport {
|
|||
private final boolean hostnameVerificationEnabled;
|
||||
private final SSLConfiguration sslConfiguration;
|
||||
private final SSLService sslService;
|
||||
private final SNIServerName serverName;
|
||||
|
||||
private ClientSslHandlerInitializer(SSLConfiguration sslConfiguration, SSLService sslService, boolean hostnameVerificationEnabled) {
|
||||
private ClientSslHandlerInitializer(SSLConfiguration sslConfiguration, SSLService sslService, boolean hostnameVerificationEnabled,
|
||||
SNIServerName serverName) {
|
||||
this.sslConfiguration = sslConfiguration;
|
||||
this.hostnameVerificationEnabled = hostnameVerificationEnabled;
|
||||
this.sslService = sslService;
|
||||
this.serverName = serverName;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -207,6 +227,11 @@ public class SecurityNetty4Transport extends Netty4Transport {
|
|||
}
|
||||
|
||||
sslEngine.setUseClientMode(true);
|
||||
if (serverName != null) {
|
||||
SSLParameters sslParameters = sslEngine.getSSLParameters();
|
||||
sslParameters.setServerNames(Collections.singletonList(serverName));
|
||||
sslEngine.setSSLParameters(sslParameters);
|
||||
}
|
||||
ctx.pipeline().replace(this, "ssl", new SslHandler(sslEngine));
|
||||
super.connect(ctx, remoteAddress, localAddress, promise);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ccr;
|
||||
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractSerializingTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class AutoFollowMetadataTests extends AbstractSerializingTestCase<AutoFollowMetadata> {
|
||||
|
||||
@Override
|
||||
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||
return s -> true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AutoFollowMetadata doParseInstance(XContentParser parser) throws IOException {
|
||||
return AutoFollowMetadata.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AutoFollowMetadata createTestInstance() {
|
||||
int numEntries = randomIntBetween(0, 32);
|
||||
Map<String, AutoFollowMetadata.AutoFollowPattern> configs = new HashMap<>(numEntries);
|
||||
Map<String, List<String>> followedLeaderIndices = new HashMap<>(numEntries);
|
||||
for (int i = 0; i < numEntries; i++) {
|
||||
List<String> leaderPatterns = Arrays.asList(generateRandomStringArray(4, 4, false));
|
||||
AutoFollowMetadata.AutoFollowPattern autoFollowPattern =
|
||||
new AutoFollowMetadata.AutoFollowPattern(leaderPatterns, randomAlphaOfLength(4), randomIntBetween(0, Integer.MAX_VALUE),
|
||||
randomIntBetween(0, Integer.MAX_VALUE), randomNonNegativeLong(), randomIntBetween(0, Integer.MAX_VALUE),
|
||||
randomIntBetween(0, Integer.MAX_VALUE), TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(500));
|
||||
configs.put(Integer.toString(i), autoFollowPattern);
|
||||
followedLeaderIndices.put(Integer.toString(i), Arrays.asList(generateRandomStringArray(4, 4, false)));
|
||||
}
|
||||
return new AutoFollowMetadata(configs, followedLeaderIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Writeable.Reader<AutoFollowMetadata> instanceReader() {
|
||||
return AutoFollowMetadata::new;
|
||||
}
|
||||
}
|
|
@ -183,7 +183,7 @@ public class AuthorizationServiceTests extends ESTestCase {
|
|||
rolesStore = mock(CompositeRolesStore.class);
|
||||
clusterService = mock(ClusterService.class);
|
||||
final Settings settings = Settings.builder()
|
||||
.put("search.remote.other_cluster.seeds", "localhost:9999")
|
||||
.put("cluster.remote.other_cluster.seeds", "localhost:9999")
|
||||
.build();
|
||||
final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
|
||||
|
|
|
@ -110,8 +110,8 @@ public class IndicesAndAliasesResolverTests extends ESTestCase {
|
|||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 2))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 1))
|
||||
.put("search.remote.remote.seeds", "127.0.0.1:" + randomIntBetween(9301, 9350))
|
||||
.put("search.remote.other_remote.seeds", "127.0.0.1:" + randomIntBetween(9351, 9399))
|
||||
.put("cluster.remote.remote.seeds", "127.0.0.1:" + randomIntBetween(9301, 9350))
|
||||
.put("cluster.remote.other_remote.seeds", "127.0.0.1:" + randomIntBetween(9351, 9399))
|
||||
.build();
|
||||
|
||||
indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY);
|
||||
|
|
|
@ -0,0 +1,383 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.transport.netty4;
|
||||
|
||||
import io.netty.bootstrap.ServerBootstrap;
|
||||
import io.netty.channel.Channel;
|
||||
import io.netty.channel.ChannelFuture;
|
||||
import io.netty.channel.ChannelInitializer;
|
||||
import io.netty.channel.nio.NioEventLoopGroup;
|
||||
import io.netty.channel.socket.nio.NioServerSocketChannel;
|
||||
import io.netty.handler.ssl.SslHandler;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.env.TestEnvironment;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.AbstractSimpleTransportTestCase;
|
||||
import org.elasticsearch.transport.BindTransportException;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.ConnectionProfile;
|
||||
import org.elasticsearch.transport.TcpChannel;
|
||||
import org.elasticsearch.transport.TcpTransport;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.common.socket.SocketAccess;
|
||||
import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport;
|
||||
import org.elasticsearch.xpack.core.ssl.SSLConfiguration;
|
||||
import org.elasticsearch.xpack.core.ssl.SSLService;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
import javax.net.ssl.HandshakeCompletedListener;
|
||||
import javax.net.ssl.SNIHostName;
|
||||
import javax.net.ssl.SNIMatcher;
|
||||
import javax.net.ssl.SNIServerName;
|
||||
import javax.net.ssl.SSLEngine;
|
||||
import javax.net.ssl.SSLParameters;
|
||||
import javax.net.ssl.SSLSocket;
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.xpack.core.security.SecurityField.setting;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
public class SimpleSecurityNetty4TransportTests extends AbstractSimpleTransportTestCase {
|
||||
|
||||
private static final ConnectionProfile SINGLE_CHANNEL_PROFILE;
|
||||
|
||||
static {
|
||||
ConnectionProfile.Builder builder = new ConnectionProfile.Builder();
|
||||
builder.addConnections(1,
|
||||
TransportRequestOptions.Type.BULK,
|
||||
TransportRequestOptions.Type.PING,
|
||||
TransportRequestOptions.Type.RECOVERY,
|
||||
TransportRequestOptions.Type.REG,
|
||||
TransportRequestOptions.Type.STATE);
|
||||
SINGLE_CHANNEL_PROFILE = builder.build();
|
||||
}
|
||||
|
||||
private SSLService createSSLService() {
|
||||
Path testnodeCert = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt");
|
||||
Path testnodeKey = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem");
|
||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("xpack.ssl.secure_key_passphrase", "testnode");
|
||||
Settings settings = Settings.builder()
|
||||
.put("xpack.security.transport.ssl.enabled", true)
|
||||
.put("xpack.ssl.key", testnodeKey)
|
||||
.put("xpack.ssl.certificate", testnodeCert)
|
||||
.put("path.home", createTempDir())
|
||||
.setSecureSettings(secureSettings)
|
||||
.build();
|
||||
try {
|
||||
return new SSLService(settings, TestEnvironment.newEnvironment(settings));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public MockTransportService nettyFromThreadPool(Settings settings, ThreadPool threadPool, final Version version,
|
||||
ClusterSettings clusterSettings, boolean doHandshake) {
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList());
|
||||
NetworkService networkService = new NetworkService(Collections.emptyList());
|
||||
Settings settings1 = Settings.builder()
|
||||
.put(settings)
|
||||
.put("xpack.security.transport.ssl.enabled", true).build();
|
||||
Transport transport = new SecurityNetty4Transport(settings1, threadPool,
|
||||
networkService, BigArrays.NON_RECYCLING_INSTANCE, namedWriteableRegistry,
|
||||
new NoneCircuitBreakerService(), createSSLService()) {
|
||||
|
||||
@Override
|
||||
protected Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) throws IOException,
|
||||
InterruptedException {
|
||||
if (doHandshake) {
|
||||
return super.executeHandshake(node, channel, timeout);
|
||||
} else {
|
||||
return version.minimumCompatibilityVersion();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Version getCurrentVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
};
|
||||
MockTransportService mockTransportService =
|
||||
MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings,
|
||||
Collections.emptySet());
|
||||
mockTransportService.start();
|
||||
return mockTransportService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) {
|
||||
settings = Settings.builder().put(settings)
|
||||
.put(TcpTransport.PORT.getKey(), "0")
|
||||
.build();
|
||||
MockTransportService transportService = nettyFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake);
|
||||
transportService.start();
|
||||
return transportService;
|
||||
}
|
||||
|
||||
public void testConnectException() throws UnknownHostException {
|
||||
try {
|
||||
serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876),
|
||||
emptyMap(), emptySet(), Version.CURRENT));
|
||||
fail("Expected ConnectTransportException");
|
||||
} catch (ConnectTransportException e) {
|
||||
assertThat(e.getMessage(), containsString("connect_exception"));
|
||||
assertThat(e.getMessage(), containsString("[127.0.0.1:9876]"));
|
||||
Throwable cause = e.getCause();
|
||||
assertThat(cause, instanceOf(IOException.class));
|
||||
}
|
||||
}
|
||||
|
||||
public void testBindUnavailableAddress() {
|
||||
// this is on a lower level since it needs access to the TransportService before it's started
|
||||
int port = serviceA.boundAddress().publishAddress().getPort();
|
||||
Settings settings = Settings.builder()
|
||||
.put(Node.NODE_NAME_SETTING.getKey(), "foobar")
|
||||
.put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "")
|
||||
.put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING")
|
||||
.put("transport.tcp.port", port)
|
||||
.build();
|
||||
ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> {
|
||||
MockTransportService transportService = nettyFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings, true);
|
||||
try {
|
||||
transportService.start();
|
||||
} finally {
|
||||
transportService.stop();
|
||||
transportService.close();
|
||||
}
|
||||
});
|
||||
assertEquals("Failed to bind to [" + port + "]", bindTransportException.getMessage());
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Need to open socket connection")
|
||||
public void testRenegotiation() throws Exception {
|
||||
SSLService sslService = createSSLService();
|
||||
final SSLConfiguration sslConfiguration = sslService.getSSLConfiguration("xpack.ssl");
|
||||
SocketFactory factory = sslService.sslSocketFactory(sslConfiguration);
|
||||
try (SSLSocket socket = (SSLSocket) factory.createSocket()) {
|
||||
SocketAccess.doPrivileged(() -> socket.connect(serviceA.boundAddress().publishAddress().address()));
|
||||
|
||||
CountDownLatch handshakeLatch = new CountDownLatch(1);
|
||||
HandshakeCompletedListener firstListener = event -> handshakeLatch.countDown();
|
||||
socket.addHandshakeCompletedListener(firstListener);
|
||||
socket.startHandshake();
|
||||
handshakeLatch.await();
|
||||
socket.removeHandshakeCompletedListener(firstListener);
|
||||
|
||||
OutputStreamStreamOutput stream = new OutputStreamStreamOutput(socket.getOutputStream());
|
||||
stream.writeByte((byte) 'E');
|
||||
stream.writeByte((byte) 'S');
|
||||
stream.writeInt(-1);
|
||||
stream.flush();
|
||||
|
||||
socket.startHandshake();
|
||||
CountDownLatch renegotiationLatch = new CountDownLatch(1);
|
||||
HandshakeCompletedListener secondListener = event -> renegotiationLatch.countDown();
|
||||
socket.addHandshakeCompletedListener(secondListener);
|
||||
|
||||
AtomicReference<Exception> error = new AtomicReference<>();
|
||||
CountDownLatch catchReadErrorsLatch = new CountDownLatch(1);
|
||||
Thread renegotiationThread = new Thread(() -> {
|
||||
try {
|
||||
socket.setSoTimeout(50);
|
||||
socket.getInputStream().read();
|
||||
} catch (SocketTimeoutException e) {
|
||||
// Ignore. We expect a timeout.
|
||||
} catch (IOException e) {
|
||||
error.set(e);
|
||||
} finally {
|
||||
catchReadErrorsLatch.countDown();
|
||||
}
|
||||
});
|
||||
renegotiationThread.start();
|
||||
renegotiationLatch.await();
|
||||
socket.removeHandshakeCompletedListener(secondListener);
|
||||
catchReadErrorsLatch.await();
|
||||
|
||||
assertNull(error.get());
|
||||
|
||||
stream.writeByte((byte) 'E');
|
||||
stream.writeByte((byte) 'S');
|
||||
stream.writeInt(-1);
|
||||
stream.flush();
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: These tests currently rely on plaintext transports
|
||||
|
||||
@Override
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33285")
|
||||
public void testTcpHandshake() {
|
||||
}
|
||||
|
||||
// TODO: These tests as configured do not currently work with the security transport
|
||||
|
||||
@Override
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33285")
|
||||
public void testTransportProfilesWithPortAndHost() {
|
||||
}
|
||||
|
||||
public void testSNIServerNameIsPropagated() throws Exception {
|
||||
SSLService sslService = createSSLService();
|
||||
final ServerBootstrap serverBootstrap = new ServerBootstrap();
|
||||
boolean success = false;
|
||||
try {
|
||||
serverBootstrap.group(new NioEventLoopGroup(1));
|
||||
serverBootstrap.channel(NioServerSocketChannel.class);
|
||||
|
||||
final String sniIp = "sni-hostname";
|
||||
final SNIHostName sniHostName = new SNIHostName(sniIp);
|
||||
final CountDownLatch latch = new CountDownLatch(2);
|
||||
serverBootstrap.childHandler(new ChannelInitializer<Channel>() {
|
||||
|
||||
@Override
|
||||
protected void initChannel(Channel ch) {
|
||||
SSLEngine serverEngine = sslService.createSSLEngine(sslService.getSSLConfiguration(setting("transport.ssl.")),
|
||||
null, -1);
|
||||
serverEngine.setUseClientMode(false);
|
||||
SSLParameters sslParameters = serverEngine.getSSLParameters();
|
||||
sslParameters.setSNIMatchers(Collections.singletonList(new SNIMatcher(0) {
|
||||
@Override
|
||||
public boolean matches(SNIServerName sniServerName) {
|
||||
if (sniHostName.equals(sniServerName)) {
|
||||
latch.countDown();
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}));
|
||||
serverEngine.setSSLParameters(sslParameters);
|
||||
final SslHandler sslHandler = new SslHandler(serverEngine);
|
||||
sslHandler.handshakeFuture().addListener(future -> latch.countDown());
|
||||
ch.pipeline().addFirst("sslhandler", sslHandler);
|
||||
}
|
||||
});
|
||||
serverBootstrap.validate();
|
||||
ChannelFuture serverFuture = serverBootstrap.bind(getLocalEphemeral());
|
||||
serverFuture.await();
|
||||
InetSocketAddress serverAddress = (InetSocketAddress) serverFuture.channel().localAddress();
|
||||
|
||||
try (MockTransportService serviceC = build(
|
||||
Settings.builder()
|
||||
.put("name", "TS_TEST")
|
||||
.put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "")
|
||||
.put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING")
|
||||
.build(),
|
||||
version0,
|
||||
null, true)) {
|
||||
serviceC.acceptIncomingRequests();
|
||||
|
||||
HashMap<String, String> attributes = new HashMap<>();
|
||||
attributes.put("server_name", sniIp);
|
||||
DiscoveryNode node = new DiscoveryNode("server_node_id", new TransportAddress(serverAddress), attributes,
|
||||
EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT);
|
||||
|
||||
new Thread(() -> {
|
||||
try {
|
||||
serviceC.connectToNode(node, SINGLE_CHANNEL_PROFILE);
|
||||
} catch (ConnectTransportException ex) {
|
||||
// Ignore. The other side is not setup to do the ES handshake. So this will fail.
|
||||
}
|
||||
}).start();
|
||||
|
||||
latch.await();
|
||||
serverBootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS);
|
||||
success = true;
|
||||
}
|
||||
} finally {
|
||||
if (success == false) {
|
||||
serverBootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testInvalidSNIServerName() throws Exception {
|
||||
SSLService sslService = createSSLService();
|
||||
final ServerBootstrap serverBootstrap = new ServerBootstrap();
|
||||
boolean success = false;
|
||||
try {
|
||||
serverBootstrap.group(new NioEventLoopGroup(1));
|
||||
serverBootstrap.channel(NioServerSocketChannel.class);
|
||||
|
||||
final String sniIp = "invalid_hostname";
|
||||
serverBootstrap.childHandler(new ChannelInitializer<Channel>() {
|
||||
|
||||
@Override
|
||||
protected void initChannel(Channel ch) {
|
||||
SSLEngine serverEngine = sslService.createSSLEngine(sslService.getSSLConfiguration(setting("transport.ssl.")),
|
||||
null, -1);
|
||||
serverEngine.setUseClientMode(false);
|
||||
final SslHandler sslHandler = new SslHandler(serverEngine);
|
||||
ch.pipeline().addFirst("sslhandler", sslHandler);
|
||||
}
|
||||
});
|
||||
serverBootstrap.validate();
|
||||
ChannelFuture serverFuture = serverBootstrap.bind(getLocalEphemeral());
|
||||
serverFuture.await();
|
||||
InetSocketAddress serverAddress = (InetSocketAddress) serverFuture.channel().localAddress();
|
||||
|
||||
try (MockTransportService serviceC = build(
|
||||
Settings.builder()
|
||||
.put("name", "TS_TEST")
|
||||
.put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "")
|
||||
.put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING")
|
||||
.build(),
|
||||
version0,
|
||||
null, true)) {
|
||||
serviceC.acceptIncomingRequests();
|
||||
|
||||
HashMap<String, String> attributes = new HashMap<>();
|
||||
attributes.put("server_name", sniIp);
|
||||
DiscoveryNode node = new DiscoveryNode("server_node_id", new TransportAddress(serverAddress), attributes,
|
||||
EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT);
|
||||
|
||||
ConnectTransportException connectException = expectThrows(ConnectTransportException.class,
|
||||
() -> serviceC.connectToNode(node, SINGLE_CHANNEL_PROFILE));
|
||||
|
||||
assertThat(connectException.getMessage(), containsString("invalid DiscoveryNode server_name [invalid_hostname]"));
|
||||
|
||||
serverBootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS);
|
||||
success = true;
|
||||
}
|
||||
} finally {
|
||||
if (success == false) {
|
||||
serverBootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -208,7 +208,14 @@ public class SimpleSecurityNioTransportTests extends AbstractSimpleTransportTest
|
|||
// TODO: These tests currently rely on plaintext transports
|
||||
|
||||
@Override
|
||||
@AwaitsFix(bugUrl = "")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33285")
|
||||
public void testTcpHandshake() throws IOException, InterruptedException {
|
||||
}
|
||||
|
||||
// TODO: These tests as configured do not currently work with the security transport
|
||||
|
||||
@Override
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33285")
|
||||
public void testTransportProfilesWithPortAndHost() {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
{
|
||||
"ccr.delete_auto_follow_pattern": {
|
||||
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current",
|
||||
"methods": [ "DELETE" ],
|
||||
"url": {
|
||||
"path": "/_ccr/_auto_follow/{leader_cluster_alias}",
|
||||
"paths": [ "/_ccr/_auto_follow/{leader_cluster_alias}" ],
|
||||
"parts": {
|
||||
"leader_cluster_alias": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the leader cluster alias."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
"ccr.put_auto_follow_pattern": {
|
||||
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current",
|
||||
"methods": [ "PUT" ],
|
||||
"url": {
|
||||
"path": "/_ccr/_auto_follow/{leader_cluster_alias}",
|
||||
"paths": [ "/_ccr/_auto_follow/{leader_cluster_alias}" ],
|
||||
"parts": {
|
||||
"leader_cluster_alias": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the leader cluster alias."
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": {
|
||||
"description" : "The specification of the auto follow pattern",
|
||||
"required" : true
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
---
|
||||
"Test put and delete auto follow pattern":
|
||||
- do:
|
||||
ccr.put_auto_follow_pattern:
|
||||
leader_cluster_alias: _local_
|
||||
body:
|
||||
leader_index_patterns: ['logs-*']
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
ccr.delete_auto_follow_pattern:
|
||||
leader_cluster_alias: _local_
|
||||
- is_true: acknowledged
|
|
@ -15,7 +15,7 @@ task remoteClusterTest(type: RestIntegTestTask) {
|
|||
remoteClusterTestCluster {
|
||||
numNodes = 2
|
||||
clusterName = 'remote-cluster'
|
||||
setting 'search.remote.connect', false
|
||||
setting 'cluster.remote.connect', false
|
||||
setting 'xpack.ilm.enabled', 'false'
|
||||
setting 'xpack.security.enabled', 'true'
|
||||
setting 'xpack.watcher.enabled', 'false'
|
||||
|
@ -61,9 +61,9 @@ mixedClusterTestCluster {
|
|||
retries: 10)
|
||||
return tmpFile.exists()
|
||||
}
|
||||
setting 'search.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\""
|
||||
setting 'search.remote.connections_per_cluster', 1
|
||||
setting 'search.remote.connect', true
|
||||
setting 'cluster.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\""
|
||||
setting 'cluster.remote.connections_per_cluster', 1
|
||||
setting 'cluster.remote.connect', true
|
||||
}
|
||||
|
||||
mixedClusterTestRunner {
|
||||
|
|
|
@ -160,16 +160,16 @@ teardown:
|
|||
cluster.get_settings:
|
||||
include_defaults: true
|
||||
|
||||
- set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip }
|
||||
- set: { defaults.cluster.remote.my_remote_cluster.seeds.0: remote_ip }
|
||||
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
flat_settings: true
|
||||
body:
|
||||
transient:
|
||||
search.remote.test_remote_cluster.seeds: $remote_ip
|
||||
cluster.remote.test_remote_cluster.seeds: $remote_ip
|
||||
|
||||
- match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}}
|
||||
- match: {transient: {cluster.remote.test_remote_cluster.seeds: $remote_ip}}
|
||||
|
||||
- do:
|
||||
headers: { Authorization: "Basic am9lOnMza3JpdA==" }
|
||||
|
|
|
@ -48,16 +48,16 @@ teardown:
|
|||
cluster.get_settings:
|
||||
include_defaults: true
|
||||
|
||||
- set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip }
|
||||
- set: { defaults.cluster.remote.my_remote_cluster.seeds.0: remote_ip }
|
||||
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
flat_settings: true
|
||||
body:
|
||||
transient:
|
||||
search.remote.test_remote_cluster.seeds: $remote_ip
|
||||
cluster.remote.test_remote_cluster.seeds: $remote_ip
|
||||
|
||||
- match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}}
|
||||
- match: {transient: {cluster.remote.test_remote_cluster.seeds: $remote_ip}}
|
||||
|
||||
# we do another search here since this will enforce the connection to be established
|
||||
# otherwise the cluster might not have been connected yet.
|
||||
|
|
Loading…
Reference in New Issue