From f8e0557be5e8177382c9d1e36bf8dffa979e5446 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Sat, 23 Jul 2016 01:17:23 +0200 Subject: [PATCH 001/103] Extract AWS Key from KeyChain instead of using potential null value While I was working on #18703, I discovered a bad behavior when people don't provide AWS key/secret as part as their `elasticsearch.yml` but rely on SysProps or env. variables... In [`InternalAwsS3Service#getClient(...)`](https://github.com/elastic/elasticsearch/blob/d4366f8493ac8d2f7091404ffd346e4f3c0f9af9/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java#L76-L141), we have: ```java Tuple clientDescriptor = new Tuple<>(endpoint, account); AmazonS3Client client = clients.get(clientDescriptor); ``` But if people don't provide credentials, `account` is `null`. Even if it actually could work, I think that we should use the `AWSCredentialsProvider` we create later on and extract from it the `account` (AWS KEY actually) and then use it as the second value of the tuple. Closes #19557. --- .../elasticsearch/cloud/aws/AwsS3Service.java | 3 ++- .../cloud/aws/InternalAwsS3Service.java | 19 ++++++++++++---- .../repositories/s3/S3Repository.java | 5 +---- .../cloud/aws/AwsS3ServiceImplTests.java | 9 ++------ .../cloud/aws/TestAwsS3Service.java | 6 ++--- .../s3/AbstractS3SnapshotRestoreTest.java | 22 +++++++++---------- .../repositories/s3/S3RepositoryTests.java | 2 +- 7 files changed, 34 insertions(+), 32 deletions(-) diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java index 43de8a3ba27..59c3d3445a2 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java @@ -24,6 +24,7 @@ import com.amazonaws.services.s3.AmazonS3; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; import java.util.Locale; import java.util.function.Function; @@ -154,6 +155,6 @@ public interface AwsS3Service extends LifecycleComponent { Setting ENDPOINT_SETTING = Setting.simpleString("cloud.aws.s3.endpoint", Property.NodeScope); } - AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries, + AmazonS3 client(Settings repositorySettings, String endpoint, Protocol protocol, String region, Integer maxRetries, boolean useThrottleRetries, Boolean pathStyleAccess); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java index 27053379db1..c4d8a63adc6 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java @@ -35,10 +35,13 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.s3.S3Repository; import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.repositories.s3.S3Repository.getValue; + /** * */ @@ -54,17 +57,20 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent implements } @Override - public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String key, String secret, Integer maxRetries, + public synchronized AmazonS3 client(Settings repositorySettings, String endpoint, Protocol protocol, String region, Integer maxRetries, boolean useThrottleRetries, Boolean pathStyleAccess) { String foundEndpoint = findEndpoint(logger, settings, endpoint, region); - Tuple clientDescriptor = new Tuple<>(foundEndpoint, key); + + AWSCredentialsProvider credentials = buildCredentials(logger, settings, repositorySettings); + + Tuple clientDescriptor = new Tuple<>(foundEndpoint, credentials.getCredentials().getAWSAccessKeyId()); AmazonS3Client client = clients.get(clientDescriptor); if (client != null) { return client; } client = new AmazonS3Client( - buildCredentials(logger, key, secret), + credentials, buildConfiguration(logger, settings, protocol, maxRetries, foundEndpoint, useThrottleRetries)); if (pathStyleAccess != null) { @@ -116,8 +122,13 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent implements return clientConfiguration; } - public static AWSCredentialsProvider buildCredentials(ESLogger logger, String key, String secret) { + public static AWSCredentialsProvider buildCredentials(ESLogger logger, Settings settings, Settings repositorySettings) { AWSCredentialsProvider credentials; + String key = getValue(repositorySettings, settings, + S3Repository.Repository.KEY_SETTING, S3Repository.Repositories.KEY_SETTING); + String secret = getValue(repositorySettings, settings, + S3Repository.Repository.SECRET_SETTING, S3Repository.Repositories.SECRET_SETTING); + if (key.isEmpty() && secret.isEmpty()) { logger.debug("Using either environment variables, system properties or instance profile credentials"); credentials = new DefaultAWSCredentialsProviderChain(); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 56d05b65711..b5abb361be8 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -301,11 +301,8 @@ public class S3Repository extends BlobStoreRepository { bucket, region, endpoint, protocol, chunkSize, serverSideEncryption, bufferSize, maxRetries, useThrottleRetries, cannedACL, storageClass, pathStyleAccess); - String key = getValue(metadata.settings(), settings, Repository.KEY_SETTING, Repositories.KEY_SETTING); - String secret = getValue(metadata.settings(), settings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING); - blobStore = new S3BlobStore(settings, - s3Service.client(endpoint, protocol, region, key, secret, maxRetries, useThrottleRetries, pathStyleAccess), + s3Service.client(metadata.settings(), endpoint, protocol, region, maxRetries, useThrottleRetries, pathStyleAccess), bucket, region, serverSideEncryption, bufferSize, maxRetries, cannedACL, storageClass); String basePath = getValue(metadata.settings(), settings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AwsS3ServiceImplTests.java index 777bb5ff358..3f92a511190 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AwsS3ServiceImplTests.java @@ -35,7 +35,7 @@ import static org.hamcrest.Matchers.is; public class AwsS3ServiceImplTests extends ESTestCase { public void testAWSCredentialsWithSystemProviders() { - AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, "", ""); + AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, Settings.EMPTY, Settings.EMPTY); assertThat(credentialsProvider, instanceOf(DefaultAWSCredentialsProviderChain.class)); } @@ -136,12 +136,7 @@ public class AwsS3ServiceImplTests extends ESTestCase { protected void launchAWSCredentialsWithElasticsearchSettingsTest(Settings singleRepositorySettings, Settings settings, String expectedKey, String expectedSecret) { - String key = S3Repository.getValue(singleRepositorySettings, settings, - S3Repository.Repository.KEY_SETTING, S3Repository.Repositories.KEY_SETTING); - String secret = S3Repository.getValue(singleRepositorySettings, settings, - S3Repository.Repository.SECRET_SETTING, S3Repository.Repositories.SECRET_SETTING); - - AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, key, secret).getCredentials(); + AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, settings, singleRepositorySettings).getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is(expectedKey)); assertThat(credentials.getAWSSecretKey(), is(expectedSecret)); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java index d7c706822a8..e7a57958a89 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java @@ -21,10 +21,8 @@ package org.elasticsearch.cloud.aws; import com.amazonaws.Protocol; import com.amazonaws.services.s3.AmazonS3; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugin.repository.s3.S3RepositoryPlugin; -import org.elasticsearch.plugins.Plugin; import java.util.IdentityHashMap; @@ -44,9 +42,9 @@ public class TestAwsS3Service extends InternalAwsS3Service { @Override - public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries, + public synchronized AmazonS3 client(Settings repositorySettings, String endpoint, Protocol protocol, String region, Integer maxRetries, boolean useThrottleRetries, Boolean pathStyleAccess) { - return cachedWrapper(super.client(endpoint, protocol, region, account, key, maxRetries, useThrottleRetries, pathStyleAccess)); + return cachedWrapper(super.client(repositorySettings, endpoint, protocol, region, maxRetries, useThrottleRetries, pathStyleAccess)); } private AmazonS3 cachedWrapper(AmazonS3 client) { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java index a3671b42ee4..d9d15ce0b3b 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java @@ -161,12 +161,15 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase public void testEncryption() { Client client = client(); logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath); + + Settings repositorySettings = Settings.builder() + .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) + .put(S3Repository.Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000)) + .put(S3Repository.Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true) + .build(); + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") - .setType("s3").setSettings(Settings.builder() - .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) - .put(S3Repository.Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000)) - .put(S3Repository.Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true) - ).get(); + .setType("s3").setSettings(repositorySettings).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); @@ -193,11 +196,10 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase Settings settings = internalCluster().getInstance(Settings.class); Settings bucket = settings.getByPrefix("repositories.s3."); AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client( + repositorySettings, null, S3Repository.Repositories.PROTOCOL_SETTING.get(settings), S3Repository.Repositories.REGION_SETTING.get(settings), - S3Repository.Repositories.KEY_SETTING.get(settings), - S3Repository.Repositories.SECRET_SETTING.get(settings), null, randomBoolean(), null); String bucketName = bucket.get("bucket"); @@ -466,15 +468,13 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase String endpoint = bucket.get("endpoint", S3Repository.Repositories.ENDPOINT_SETTING.get(settings)); Protocol protocol = S3Repository.Repositories.PROTOCOL_SETTING.get(settings); String region = bucket.get("region", S3Repository.Repositories.REGION_SETTING.get(settings)); - String accessKey = bucket.get("access_key", S3Repository.Repositories.KEY_SETTING.get(settings)); - String secretKey = bucket.get("secret_key", S3Repository.Repositories.SECRET_SETTING.get(settings)); String bucketName = bucket.get("bucket"); // We check that settings has been set in elasticsearch.yml integration test file // as described in README assertThat("Your settings in elasticsearch.yml are incorrects. Check README file.", bucketName, notNullValue()); - AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(endpoint, protocol, region, accessKey, secretKey, - null, randomBoolean(), null); + AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(Settings.EMPTY, endpoint, protocol, region, null, + randomBoolean(), null); try { ObjectListing prevListing = null; //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index d6cca5d70d6..f8940c6158c 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -58,7 +58,7 @@ public class S3RepositoryTests extends ESTestCase { @Override protected void doClose() {} @Override - public AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries, + public AmazonS3 client(Settings repositorySettings, String endpoint, Protocol protocol, String region, Integer maxRetries, boolean useThrottleRetries, Boolean pathStyleAccess) { return new DummyS3Client(); } From 849847078142a394e3386f6bd5cf929affd5a52a Mon Sep 17 00:00:00 2001 From: jaymode Date: Tue, 2 Aug 2016 13:45:41 -0400 Subject: [PATCH 002/103] update transport to allow for extensions --- .../transport/netty4/Netty4Transport.java | 64 +++++++++++++------ 1 file changed, 45 insertions(+), 19 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index c1b2ef10211..9dc3291693c 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -199,16 +199,7 @@ public class Netty4Transport extends TcpTransport { bootstrap.channel(NioSocketChannel.class); } - bootstrap.handler(new ChannelInitializer() { - - @Override - protected void initChannel(SocketChannel ch) throws Exception { - ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); - // using a dot as a prefix means this cannot come from any settings parsed - ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, ".client")); - } - - }); + bootstrap.handler(getClientChannelInitializer()); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Math.toIntExact(connectTimeout.millis())); bootstrap.option(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings)); @@ -292,14 +283,7 @@ public class Netty4Transport extends TcpTransport { serverBootstrap.channel(NioServerSocketChannel.class); } - serverBootstrap.childHandler(new ChannelInitializer() { - @Override - protected void initChannel(SocketChannel ch) throws Exception { - ch.pipeline().addLast("open_channels", Netty4Transport.this.serverOpenChannels); - ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); - ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, name)); - } - }); + serverBootstrap.childHandler(getServerChannelInitializer(name, settings)); serverBootstrap.childOption(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings)); serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, TCP_KEEP_ALIVE.get(settings)); @@ -326,6 +310,14 @@ public class Netty4Transport extends TcpTransport { serverBootstraps.put(name, serverBootstrap); } + protected ChannelInitializer getServerChannelInitializer(String name, Settings settings) { + return new ServerChannelInitializer(name, settings); + } + + protected ChannelInitializer getClientChannelInitializer() { + return new ClientChannelInitializer(); + } + protected final void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { final Throwable unwrapped = ExceptionsHelper.unwrap(cause, ElasticsearchException.class); final Throwable t = unwrapped != null ? unwrapped : cause; @@ -348,7 +340,9 @@ public class Netty4Transport extends TcpTransport { Channel[] channels = new Channel[1]; channels[0] = connect.channel(); channels[0].closeFuture().addListener(new ChannelCloseListener(node)); - return new NodeChannels(channels, channels, channels, channels, channels); + NodeChannels nodeChannels = new NodeChannels(channels, channels, channels, channels, channels); + onAfterChannelsConnected(nodeChannels); + return nodeChannels; } protected NodeChannels connectToChannels(DiscoveryNode node) { @@ -409,6 +403,7 @@ public class Netty4Transport extends TcpTransport { } throw e; } + onAfterChannelsConnected(nodeChannels); success = true; } finally { if (success == false) { @@ -422,6 +417,9 @@ public class Netty4Transport extends TcpTransport { return nodeChannels; } + protected void onAfterChannelsConnected(NodeChannels nodeChannels) { + + } private class ChannelCloseListener implements ChannelFutureListener { private final DiscoveryNode node; @@ -503,4 +501,32 @@ public class Netty4Transport extends TcpTransport { }); } + protected class ClientChannelInitializer extends ChannelInitializer { + + @Override + protected void initChannel(SocketChannel ch) throws Exception { + ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); + // using a dot as a prefix means this cannot come from any settings parsed + ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, ".client")); + } + + } + + protected class ServerChannelInitializer extends ChannelInitializer { + + protected final String name; + protected final Settings settings; + + protected ServerChannelInitializer(String name, Settings settings) { + this.name = name; + this.settings = settings; + } + + @Override + protected void initChannel(SocketChannel ch) throws Exception { + ch.pipeline().addLast("open_channels", Netty4Transport.this.serverOpenChannels); + ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); + ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, name)); + } + } } From 00ca6c417ed171fe22756fa5e39bf6da05bfe170 Mon Sep 17 00:00:00 2001 From: jaymode Date: Tue, 2 Aug 2016 14:06:29 -0400 Subject: [PATCH 003/103] add javadocs --- .../elasticsearch/transport/netty4/Netty4Transport.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 9dc3291693c..d6ddc545783 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -417,9 +417,14 @@ public class Netty4Transport extends TcpTransport { return nodeChannels; } + /** + * Allows for logic to be executed after a connection has been made on all channels. While this method is being executed, the node is + * not listed as being connected to. + * @param nodeChannels the {@link NodeChannels} that have been connected + */ protected void onAfterChannelsConnected(NodeChannels nodeChannels) { - } + private class ChannelCloseListener implements ChannelFutureListener { private final DiscoveryNode node; From 669daccfbb8f567ed77a69d4f3f1a348b8f7254d Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 2 Aug 2016 14:09:05 -0400 Subject: [PATCH 004/103] Allow for Netty 4 HTTP extensions This commit enables the Netty 4 HTTP server implementation to allow for extensions. --- .../elasticsearch/http/netty4/Netty4HttpServerTransport.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 0d4a6ab5ee1..f95d9534f7a 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -525,12 +525,12 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem return new HttpChannelHandler(this, detailedErrorsEnabled, threadPool.getThreadContext()); } - static class HttpChannelHandler extends ChannelInitializer { + protected static class HttpChannelHandler extends ChannelInitializer { private final Netty4HttpServerTransport transport; private final Netty4HttpRequestHandler requestHandler; - HttpChannelHandler( + protected HttpChannelHandler( final Netty4HttpServerTransport transport, final boolean detailedErrorsEnabled, final ThreadContext threadContext) { From 6def10c5d9ee0747d42ee777e7ae27968fd69675 Mon Sep 17 00:00:00 2001 From: jaymode Date: Tue, 2 Aug 2016 14:46:44 -0400 Subject: [PATCH 005/103] make netty4 http request public --- .../java/org/elasticsearch/http/netty4/Netty4HttpRequest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java index 7825e3ebe1c..2e511d15622 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java @@ -34,7 +34,7 @@ import java.net.SocketAddress; import java.util.HashMap; import java.util.Map; -class Netty4HttpRequest extends RestRequest { +public class Netty4HttpRequest extends RestRequest { private final FullHttpRequest request; private final Channel channel; From 0461e12663b73c17792a9942460dd65c5d3d45d0 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 2 Aug 2016 16:43:28 -0400 Subject: [PATCH 006/103] Simplify Netty 4 transport implementations The Netty 4 transport implementations have an unnecessary dependency on SocketChannels, and can instead just use plain Channels. --- .../http/netty4/Netty4HttpServerTransport.java | 4 ++-- .../transport/netty4/Netty4Transport.java | 14 ++++++++------ .../netty4/Netty4HttpServerPipeliningTests.java | 3 ++- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index f95d9534f7a..7472d87209e 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -525,7 +525,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem return new HttpChannelHandler(this, detailedErrorsEnabled, threadPool.getThreadContext()); } - protected static class HttpChannelHandler extends ChannelInitializer { + protected static class HttpChannelHandler extends ChannelInitializer { private final Netty4HttpServerTransport transport; private final Netty4HttpRequestHandler requestHandler; @@ -539,7 +539,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem } @Override - protected void initChannel(SocketChannel ch) throws Exception { + protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast("openChannels", transport.serverOpenChannels); final HttpRequestDecoder decoder = new HttpRequestDecoder( Math.toIntExact(transport.maxInitialLineLength.bytes()), diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index d6ddc545783..d7631acd6b7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -25,6 +25,7 @@ import io.netty.channel.AdaptiveRecvByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; @@ -310,11 +311,11 @@ public class Netty4Transport extends TcpTransport { serverBootstraps.put(name, serverBootstrap); } - protected ChannelInitializer getServerChannelInitializer(String name, Settings settings) { + protected ChannelHandler getServerChannelInitializer(String name, Settings settings) { return new ServerChannelInitializer(name, settings); } - protected ChannelInitializer getClientChannelInitializer() { + protected ChannelHandler getClientChannelInitializer() { return new ClientChannelInitializer(); } @@ -506,10 +507,10 @@ public class Netty4Transport extends TcpTransport { }); } - protected class ClientChannelInitializer extends ChannelInitializer { + protected class ClientChannelInitializer extends ChannelInitializer { @Override - protected void initChannel(SocketChannel ch) throws Exception { + protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); // using a dot as a prefix means this cannot come from any settings parsed ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, ".client")); @@ -517,7 +518,7 @@ public class Netty4Transport extends TcpTransport { } - protected class ServerChannelInitializer extends ChannelInitializer { + protected class ServerChannelInitializer extends ChannelInitializer { protected final String name; protected final Settings settings; @@ -528,10 +529,11 @@ public class Netty4Transport extends TcpTransport { } @Override - protected void initChannel(SocketChannel ch) throws Exception { + protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast("open_channels", Netty4Transport.this.serverOpenChannels); ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, name)); } } + } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java index 4d94dc2ccaf..155bbe4bb5b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.http.netty4; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; @@ -179,7 +180,7 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase { } @Override - protected void initChannel(SocketChannel ch) throws Exception { + protected void initChannel(Channel ch) throws Exception { super.initChannel(ch); ch.pipeline().replace("handler", "handler", new PossiblySlowUpstreamHandler(executorService)); } From 4e48154130aa43e8c7184afe9966524c0c56c6d1 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 2 Aug 2016 16:54:44 -0700 Subject: [PATCH 007/103] Mappings: Fix detection of metadata fields in documents In 2.0, the ability to specify metadata fields like _routing and _ttl inside a document was removed. However, the ability to break through this restriction has lingered, and the check that enforced it is completely broken. This change fixes the check, and adds a parsing test. --- .../elasticsearch/index/mapper/DocumentParser.java | 9 ++++----- .../index/mapper/DocumentParserTests.java | 12 ++++++++++++ 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 4614021af0e..697c8e9e4ef 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -340,17 +340,13 @@ final class DocumentParser { return; } XContentParser parser = context.parser(); - - String currentFieldName = parser.currentName(); - if (atRoot && MapperService.isMetadataField(currentFieldName)) { - throw new MapperParsingException("Field [" + currentFieldName + "] is a metadata field and cannot be added inside a document. Use the index API request parameters."); - } XContentParser.Token token = parser.currentToken(); if (token == XContentParser.Token.VALUE_NULL) { // the object is null ("obj1" : null), simply bail return; } + String currentFieldName = parser.currentName(); if (token.isValue()) { throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but found a concrete value"); } @@ -384,6 +380,9 @@ final class DocumentParser { parseArray(context, mapper, currentFieldName); } else if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + if (MapperService.isMetadataField(context.path().pathAsText(currentFieldName))) { + throw new MapperParsingException("Field [" + currentFieldName + "] is a metadata field and cannot be added inside a document. Use the index API request parameters."); + } } else if (token == XContentParser.Token.VALUE_NULL) { parseNullValue(context, mapper, currentFieldName); } else if (token == null) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 058b1bd5360..f46bfa3e098 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -862,4 +862,16 @@ public class DocumentParserTests extends ESSingleNodeTestCase { () -> mapper.parse("test", "type", "1", bytes)); assertEquals("mapping set to strict, dynamic introduction of [foo] within [type] is not allowed", exception.getMessage()); } + + public void testDocumentContainsMetadataField() throws Exception { + DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); + DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); + + BytesReference bytes = XContentFactory.jsonBuilder().startObject().field("_ttl", 0).endObject().bytes(); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> + mapper.parse("test", "type", "1", bytes) + ); + assertTrue(e.getMessage(), e.getMessage().contains("cannot be added inside a document")); + } } From 7bfe1bd628d22657410c9cf2fc9576f74b096a21 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 2 Aug 2016 17:03:21 -0700 Subject: [PATCH 008/103] Check inner field with metadata field name is ok --- .../org/elasticsearch/index/mapper/DocumentParserTests.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index f46bfa3e098..632f2cef7fb 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -873,5 +873,8 @@ public class DocumentParserTests extends ESSingleNodeTestCase { mapper.parse("test", "type", "1", bytes) ); assertTrue(e.getMessage(), e.getMessage().contains("cannot be added inside a document")); + + BytesReference bytes2 = XContentFactory.jsonBuilder().startObject().field("foo._ttl", 0).endObject().bytes(); + mapper.parse("test", "type", "1", bytes2); // parses without error } } From fe823c857b71fe41bcd1cba17c6c73eb15ed9b50 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 3 Aug 2016 00:32:47 -0700 Subject: [PATCH 009/103] Plugins: Add ScriptService to dependencies available for plugin components --- core/src/main/java/org/elasticsearch/node/Node.java | 3 ++- core/src/main/java/org/elasticsearch/plugins/Plugin.java | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 56bcdcf9c8d..54c0c344953 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -333,7 +333,8 @@ public class Node implements Closeable { modules.add(settingsModule); client = new NodeClient(settings, threadPool); Collection pluginComponents = pluginsService.filterPlugins(Plugin.class).stream() - .flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService).stream()) + .flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService, + scriptModule.getScriptService()).stream()) .collect(Collectors.toList()); modules.add(b -> { b.bind(PluginsService.class).toInstance(pluginsService); diff --git a/core/src/main/java/org/elasticsearch/plugins/Plugin.java b/core/src/main/java/org/elasticsearch/plugins/Plugin.java index 03eb96a4397..9f468a3d195 100644 --- a/core/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.index.IndexModule; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; @@ -78,9 +79,10 @@ public abstract class Plugin { * @param clusterService A service to allow watching and updating cluster state * @param threadPool A service to allow retrieving an executor to run an async action * @param resourceWatcherService A service to watch for changes to node local files + * @param scriptService A service to allow running scripts on the local node */ public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, - ResourceWatcherService resourceWatcherService) { + ResourceWatcherService resourceWatcherService, ScriptService scriptService) { return Collections.emptyList(); } From 5b38282fcb0b8fb89792bf4e9e1be86ab1ede098 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Wed, 3 Aug 2016 15:37:38 +0200 Subject: [PATCH 010/103] fix bwc index tool for versions before 5.0 (#19626) * fix bwc index tool for versions before 5.0 --- dev-tools/create_bwc_index.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/dev-tools/create_bwc_index.py b/dev-tools/create_bwc_index.py index c96b66013b3..80d68a6e256 100644 --- a/dev-tools/create_bwc_index.py +++ b/dev-tools/create_bwc_index.py @@ -133,16 +133,19 @@ def start_node(version, release_dir, data_dir, repo_dir, tcp_port=DEFAULT_TRANSP logging.info('Starting node from %s on port %s/%s, data_dir %s' % (release_dir, tcp_port, http_port, data_dir)) if cluster_name is None: cluster_name = 'bwc_index_' + version - + if parse_version(version) < parse_version("5.0.0-alpha1"): + prefix = '-Des.' + else: + prefix = '-E' cmd = [ os.path.join(release_dir, 'bin/elasticsearch'), - '-Epath.data=%s' % data_dir, - '-Epath.logs=logs', - '-Ecluster.name=%s' % cluster_name, - '-Enetwork.host=localhost', - '-Etransport.tcp.port=%s' % tcp_port, - '-Ehttp.port=%s' % http_port, - '-Epath.repo=%s' % repo_dir + '%spath.data=%s' % (prefix, data_dir), + '%spath.logs=logs' % prefix, + '%scluster.name=%s' % (prefix, cluster_name), + '%snetwork.host=localhost' % prefix, + '%stransport.tcp.port=%s' % (prefix, tcp_port), + '%shttp.port=%s' % (prefix, http_port), + '%spath.repo=%s' % (prefix, repo_dir) ] if version.startswith('0.') or version.startswith('1.0.0.Beta') : cmd.append('-f') # version before 1.0 start in background automatically From 39081af9d6a95c09b05f381e0ecdf99288d0d5dd Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Wed, 3 Aug 2016 15:50:47 +0200 Subject: [PATCH 011/103] Added version 2.3.5 with bwc indices --- .../main/java/org/elasticsearch/Version.java | 4 ++++ .../test/resources/indices/bwc/index-2.3.5.zip | Bin 0 -> 106326 bytes .../test/resources/indices/bwc/repo-2.3.5.zip | Bin 0 -> 104147 bytes 3 files changed, 4 insertions(+) create mode 100644 core/src/test/resources/indices/bwc/index-2.3.5.zip create mode 100644 core/src/test/resources/indices/bwc/repo-2.3.5.zip diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index da876730b9a..a3e3b6ee3fb 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -71,6 +71,8 @@ public class Version { public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final int V_2_3_4_ID = 2030499; public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); + public static final int V_2_3_5_ID = 2030599; + public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final int V_5_0_0_alpha1_ID = 5000001; public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); public static final int V_5_0_0_alpha2_ID = 5000002; @@ -104,6 +106,8 @@ public class Version { return V_5_0_0_alpha2; case V_5_0_0_alpha1_ID: return V_5_0_0_alpha1; + case V_2_3_5_ID: + return V_2_3_5; case V_2_3_4_ID: return V_2_3_4; case V_2_3_3_ID: diff --git a/core/src/test/resources/indices/bwc/index-2.3.5.zip b/core/src/test/resources/indices/bwc/index-2.3.5.zip new file mode 100644 index 0000000000000000000000000000000000000000..ce8319ef0e6baca3b1bc49019e6d3f079d96f7d9 GIT binary patch literal 106326 zcmbrkWpEtPk|iu=ve>f4j4fHrEQ^^LEtV{1W@fgSnVF%*++ws^j245{vOd3g-^5P5 z*x8w#t%$6sA64g6=DjDY?#)z|gM~wZ`o}@+4=eL;7ythW9_llcxrwU@KtmlF3RdQ^ zP|M=4xp|^OLBl;jLqWme{=HH8KYbkU-yb*iFf+1tFt_kD`ohA_!ud})+5cO(PyZR5 zgQK~H%YPG;@?Tse{y&4V{u5T^zj{~Ze}Sd?uV9T_Tuodp{)zsNp#CR;{YQY2p#FhA zLuCv!{u}d-zXtekqFdTInwr=#bFjF${@V!tCqxEGy|S>PxM7soa7F_=vamX9=v-WX zsEec|C_dDtPh2T{F?YAj%32;V&JdOk4g-)wGY6MS+9V<2|3(oCo~3YVav&<_9rlQD znd@!75lsi{lR-E3vlE;S{XR3*cmIqm($F^cw^|XoSVdT<^I}0&G?!IUb5y9YwYZ$Mt?1Xl z{SSCCQ-3aO!l~B_rV+So=C0cUN8;^_SHr7%ocziQnih&CrXtsU)i8tGLIfPMkL0){ zW?&84Ze<|c&XCL){HIRQ*V{P{Dll#A%h5riA_TPp7H*qrm z2XX#iQ?dU~sQ$a^`d7dB{%2(WQFH$X2mh(Le{jBUpB$O}RiN)*llXt){5Q2Tv;Mmf z|H1wb`i<>&B8WYWacbAG=gB$BA0s}9Z)fpw3Mi!J@ zw5Z;o{moF;M$*^X`WuS0_QX;LM`Hm8JsVHuJgB0IhirbHbEHT=s`@c-^nieM|3wjL z9Qr?GYsP;`S-k%<2mfvA{4)Rp;?+ta10wkRZrXnVvvWp(wDiY-4bQC0$!o#Ox~* zo03E{=cSBk0{vrl1>GZtO~c~lM9RD44E3)ZTCcmOJJC$Pva5BU+FuMlGb04?MB8_* zDHwu~%wf>cswJt<&|ZXs;O%uW2EWtUAnokV;m>tMxn!zKj61NLB_I@95Bki=5nr^{f zgP9WFLB;T9>D<;)2TdC$@8eqW_2vbRA|GTLf4{*DbDJ;z(BLr@0c@&d-GKCz8s20M zkOAO$t@cs@fcaK{%J$f&WOqUE&tgS=Cb#k!YCt}X*II2}4`AI#W_#V#x*!r?&c9C^ zckHeaksXpy&FCzItyi?_Z`r)TUh;_Ic&AR9MhMY~R9^+LW4`tnFaDcAS zV#(%2qfz8kOk?y;M(8hDSnJO6gRwDsij=rMI1eE4vXbVcU5)8CXCv$cl^(0uCXKZp zRZWf+qQ8U8dFsxn!!Br_=z4X$>+H6R@2O8)u}J|jw$z>NXytM%Uz3M5>VH&4`QTQ@ z=d{$9rU%jas5};<9(Z#pG+qA$Moo(BjJRpp8XWp0Kj2h;$gS}F=uizQ+{(fAyv~>Q zQ*q6^keu6}w@)SeQT5SJ1=4qrSMlt`f(We3-icIH?r{c8DD~H_+>p7`^IHvCK#EdB zZed-GALW)~`qg>eDwN}-TzkMI<E>uu2kUe=(P$9c$!f2rYxqpD@DgU>qbq%k(BF~T8@+Aq|U zo2L9cX-BSc>}{)1&(wu!%=(qH1v}s$5$8Vj-V3*wfUkI8KYlaH6jpYNB5Wun+fYo> zb6s}Au!J}a&*fzR396WW(ewV2sf&oXG8Vlx^6YlBHY7YVWt7BLI%|R$CA+Zh-=~+Jy-O_S zavf2Jba@)2j97^$1yH!rl4d*fUs>j1;})a!((03U<8btkzz3WY`I)@9^)zA5cYme7 z_!{48XfKg)V%V1kCW!d-0LE5Wmrbfa_Or$T93D>tyS}9kDp|>H@HdF-TzM?a8n5Lrc9VQ%$uXsDN5V48}h?>Tmbn-8)9|RWu z9pq&>$!~onSM(Er$vBA0w%l9>P!x^Q`UeXmk5A6op;3kZOD4+(Yn$`g*lC%OZ=_{_ z$igQ=EW>*(4FQ1MV@8#R>6y^qct}6p9Upiv+GoKqu{rn}z)4o8L*WM$r^5)tD zywa$#r9wn!J2HGGAd+9(g9XLprva5w!!O#^FBWnIOJ5hf(tU!Ng*4&{p63tz{~(5$ zY`sZ1e_lqdpNCK_u4{iP8tT-)TYRG0`uYeedb>keQK|o=r6wfY3<1J+_Z{v zxmhFo1=@THhwmU_#MTDu4=$!|AIo$6CB#Qca1>)z`b`JP>lVFLAyfZ+iekLbwFsD? z0cY9do4qs6Rqd;dbKWt+r(5emZP|QQN~ln1x{^YPwxxb~ll|I-p9-7FqDQ}S%0uJ$ zM1}^d0QhnW36{zT&*ExYvyJfTIh@qo(HYm^yq%rt}3-#wz{6O%M;sMO>de)!y5L4 zPGNVyGLTjuf0vyK4+ab!_O1$aRSSWLWnnnwE%aVf>-0oqq#05%meVagHw+nBMaZv) z5H>A!)~4nE{C)WKWUL}=R!y(+&M_0B8)Yz$(P-e)m^){ClhPReT$Pw+OJ3z3tlguD z&AmzvCEMGTJkX%Vu}~>%$G_??VI9gp1;Nm$Nu#=n>8b(y8xdZX3M)f=>t!}bTQob`^jfuUuw5qtF=zR* z-vpi*T?lJd9M zL&Nx7Jv<;yd2M^mf)3866^)wBevE$tRz~q8&7bO_Y=R@|lFEY?L|vbW@i_Dz{7kWa zrno9&$SX=h@C{U2XH{u##Kav)c7h2o)1!4)9mMacbNt@v1fNXW#~d@Sem9wE9)Ym>aECJp|N#Fs`v)H=7jzbCm6vqhSJ_Jw4ZgIM*aG*cJO}@4@S*P2eb}tcvPE0%^1oM-) z@OD!}_`ORL5P_Ecc+wiw(uu3z#@=D4&Hb9}Tz2LFN6R#bW<`Y1@?tI0ZVT;l(g-(H znB-kXAXui3^Juz$rP^H_V=gp%4brkGv&CZDo;gkCQLsj(*yZ>%LE%?0ZI}id`Pm!Q zu5yXEHyYzo6DZLVC`3CRXD%G4v;fRyz$L?o{#NP#ST^f8AAGO4d=e#)vfnCT-N1rZ z)Cgc8Romh&Y={_e`&#jh@=6T>d|=_FpKiA(kspjdJ~)r1^l?7p)p|vD;H59y*2-UB zOSHMq`W>nswutoy-Q671MWoML@5C`-j~e=Q^!z7l+Upy;kl#|lHqGv-KFQ5!PwLd; zBq~&!?57XCp#)CVX9TF^t7ZjaFZ?M=E5Kj!>xLDZfC)c~E|oi0=@I97{Lyc)`^m*A z&2zQhov9@K_0t&zF*yjUtzR;hwGhlUDqoYgwyc=OzczW9D-vFWJQ5 z=3N$q(%pnE*$f|tKxo46%2mf5E?9%EadRgA{@E%Kgk>z<5>L_m3c^Ogt3Huc`vulS z+)RyHj34z0>oM9q5CYvQL<{u_s?W*iJ(S^S6tL!opyoS&p2z9y#x7Wj$4gjV!RP&ocsjv1%CSF*%Xrm++U_LA9bUowm z8lO)y;~>9?A->ywu~E7)%j4Y3qhT|VE;=}{X#EcL*Ql=XMj5)A#XP|8fDp0W56V7~ zHoHY!(YbDb-_k?lcs_AWRw=(q%oxW5#Orym4Et8F2~Iu5+bg)1%sO4l5p}5jx5=a4 zYzXP5=FxNgpF7C?K_;9{pKP!8JKk39UXjvj(|(MZ;E1i}JAAk8S|TmArJl6`7rI9Y z3T5B_TGi#nUD_Q`VB@}iO=1vEc{y#c|7FQ`!f&S(sIF5HvTNZ&ZW!=eCoadW5sw38 zT^xnihq&o9P8LL_xt{o?+HQ4egrPUy&#`6e%?2FF(sM#=5l(Q~c8#1X`V5D)vJ@Lc zmuuD`M=hj=M`}yMG|RFahL_n>x|&j~;FG&Fex>R)Es??5H1^{~d&X(uw0}L~oy?6v2f!qJZ=p2awQa zV7jYAsB>27Ej0BwBHWIr3l#em_7wr5u9$PJ)E&H3wm!tGg=uA&r2O)l*uZx)h4u3V zd)fx-bSvjgbJs|oRXZTfXDAH;d33cM!PfEDycs5gFR=(ZoUdqsg0Sm3UF`^VSI=E%eg1@1T4fOIUh8QDM7!XFFxt^Pm1vLp7moy0FVEfsAn3H$oqAQ4{`!p4`iU;3 z{DQvIC}kA5z^P2(8Md9-P7*jY$CB#9o=7;lccntE)Tdv3ku1$RYD_*+eI`9W zHsx7Hgw#PSt)PyGWGkq;Kv6@nrPg9Q_wo^b!W^vf0g6}GQsGg&>oUKSVKTlrbzfNR zrN67heOX7eGO`vr8t`Yi(m|#?x)?X?tIyszDaMWJ6D3lMl`sPZa5!qs^&q>o1+RQ? z7-?WIhSoa6HVl3NwRC;j2Ao3)AOibyjrsYkGjUs7r^HzTIVPfW!N+wS(ViHGf>SoS z6u0E(APTde#f%75Wk*keU(yk*zb;nrs6O0wG3=}ydUuL8(N1hrhpO2{vKzC8S~842 z^Vk^H`B>qZnN+c)$Mzu&jWJqQf;n~CjtR+<`sgS+sGxe4F`9(wJN)m{uMSk;R}G0O%>urH7jmEcqQd9+8F#D5rB$Kr(2aUv-63HE9a-hD25W;hS6zDW)ayG*SZ%@%Fs7s`&y}*haQ0#ku?SCw^qQstn<% z0nR4^O{%(!SBg zBkb4mgjqMLP$1%EHnuz2Yelav6~Ld9ObHEYmJrZ|cnyNw0PoCoG^zy~y4iIM5y$%; zgRl&eWVLVBW3g;8L@LfmLX3c!bpl$1T?3$379|XA1j=>mnUTVXq3M~%uNP?}B6_%a zQouxr&d)W0TVM@0PR;qdCaM)1vZMIjGC-m-d{3#o7s`bdSt`2y8|b*~ea{c_HmK93 z83(s=eWYA>JrY~vl8m(&GeOTz{(b9j`bYOMQg7zJV?EW2g6+{ z=>wwT$h`D20@?5t5|4uL`{8H?@KsxpiPc)a;xB+VuQ69R9sDWhdBrgVO{SpG&|68eJV_8lyU{)4boDH10qVY2Lx8 zMvM<(dn#oF!X?i@m9mb-a4~YwwMmcIMM^Do4iNh8kr7;g|9y424Ia(fA4D-TZL$Ja z#w&zTy#KlVn~Cw#K2WukjvHz(VE}2AWsLvZ<&(j)ioUR{X}->Vj|wzVeYH?YA=JJ5 za1l7e<2IdWsp&2H;y$yde>=f2Y5l=G=6-B~DmMUl)U5u>nR`jSta>CgMdnEL3WbW( zLz`8T;z7N>i{hMnfZt|9n6YnhLrk|pxM?V)8R=mcMQjbm&keoFIrC?mHD=`>z&XW0 zdCz7P8YPQ0N(yoyyZ|^m*>u}|A`*1_eU5(<*EO=bM=ywTeQ;5I({AsihZw~1?z&m;kU1}3DG@f2nn}E;-ci@T1^*|9Xxonv8~;e? zPF+i{kl8=>u}IUOrGMR`Ie+%w#CAbP9!_{$XfX`&y3&P=V>Bkq@Q2P=eoSmr(cix8 zvo*63srhcJRQdRzc?0k8;%qP=JA^OXBWcA7Q-_d~9zH6(GVU=DZ`EjGmyxeGL;q7<)ht9Rgz*7lp|74ddbY zNVC5q6)xxyG6SAtZ&YIBvkmCkg*c{1&VM>FKH1#ZWg_tA?3Mnu$#>vDr+IGZnZY2` za>KPM8_wT3V_#vKpeUWmi}39?M+VdCI>u|7W$nr*AVIq4oCD4rr!ixkrkCvuV5RHf zw;%<}j^ET@2pL$Sd2N8o za%IWqrxO-rlz|wXQF+@WXkq={Hv|iF94N>_-EYzPiF4L7(H8qkFdr3+zK~pp_WuE> zKo8<8QNwN8%1A1J(v$R1^tplp(JsP9J<3B0-Cr!j8EykCDkvbw+6Ck9IATB@3Z-0{ zL98qqdps_WDpm%0OOd*Ak?@{OaurfnlgMN_6b@0*>hDA!9+aa3J~fbg7|^0CC)%^J z(2PGIh1eHup)=hz|E)-&IdJ6o$>4M!HXQmwDkIX;`oCgNs;$VdND|y+dz5X9@W4`= zPj`GSZzs2PFViNKz>i%Ygn2)SOVP6M=~~JVPei8*5d68~&=6%vkM<$FgWgu;bg+}{{COkEs_c%4Fi`?hfGr`sEFvGE zx~0U4MJbiGGg@6*T0(phQSrGZbpoqF(#kl!)<;`2ajK{M1B8W z2U$QFozC+VMjt|2F<0(`x6d%%#578~p@Y7VwCRv@1Ejxw?pUqYj_deT!?|*wLhVWK zQQ2|KkoTAHYe5#c)`zgX;_ft#k?D$}piDJAoD*=eiI528nyRGPKzzX|tYdL#g++TE+Y(mvFDD%zJ@?Kh`gsiQ7LCM*b1mR* zaKbx%R#GDZ)Le{`ftKk^ZCdwIZRJN-^id8t)szKZ<^lUqdWEr_eFSm^~ zep3(oxizOA_clR>7TEBv7()m(IGVuTOlwfr`GguQMaNg2lvj7HLktej9yxOHDcVZe zvF{r0%C}!y8{@Ps+RC80>&*BZMbMkC32>N~RxvE|;eh4BX%I3=WWV(&<3}ph>tIUL_5_NSowww^ix`0rVPSHNbQ+u_%rKR$UXU( zRVuf&VQ1^z;KY& zCaTFtS5fc*sH)RfduCe(|J{CqOW7G{lg5A!6(@3J(|JFDCih;85d>WKsEZ{H0>BYtU`FM zv$449{kjjh6XtwR%WZ<>54$t(1;lQanoudhXR15#LrFKwY~cbZ<+GrRvdGa(314V{ zUD3kR`7>|?&7D3JdFK7}Hq+;&Pa zpm=3Gl@7x7U*Fs+X8}eX4qv{!i`YzHYOPtzIx5yxgHrpDVh;!cg}kh4^3#)xA6`fV4bS`-I64uOy`nhAz+N-lJB%yWBoUcJ`|+ddqvKH z1J+LqVV*1gVY88);a8HY-qbqH1vS{raaNp%4ljokf`O%IEqWB4G4~8TI1lvSxPK44 z1yRJak;_aFkj%05xW-*Z5NNWE;z?bSfZ&g466g<9LcJgY$ZW#B+TT{!4w$0~km)qP z6fEt7@UWH3jt&d=#$Ih6u2t7JY96SJnzx1NL##g&fgSOg2B5?G+e!fjQzwjfAUwb; zB@^EadXd(`9(C3WzlJI;($jEUTK518Kk@2I81PR?Cc(s;jtfCNQ3ctQY zpF=kyb51(Hg5>MH5RCkhH`g)-hsvzK)v`gvy_f~_UnY9>lYS=m@t439H~;j(I&%6e z0h4P|I{)xh4w6(pBAa`auwMlY@0CfrCwQ~JY;KBgzVHtl114s9Ld+a!P9V=NF?k3- zZuwJ!5k)1W#-9S6o_N48EHT8mppP7M%34fIKsseP4y)_0p++JJ-w?GE-~2uxEgZH+ zqky_a7IZjfucszakuxb|uhfB0d{lqP55`Yq(}F+Ip7?iRY;zm0VKT;5s2xt}zS*@B zWM0Ek_+aFB59u`-3&r(po@rwx1Ay`dm zeq?D5j#R12=s^fVG-q73l`8fu@nf~?qk;82%Tf(G8ddqs4Q-pgj&E=vFzce2$9EIM zhve`r0+swB`*WzRzJQ4nS%U=W;V_J;*2Y1S&C?b;?#)+3v1U{OZe!06qn$!1$za#p zWHdq+tID~Gi2AdOkm$O}b_3!5#Gwo>i_+L~E`!p7%4@FMwDS8{2wqM#C~^6%|&W;C^ft zK$GuZ8@8b3;qrQ`UwyVG_J4#i^YGBP3IyRrue>cK!%5jIan>of3>ApkVV z$(WuLak_1N5LzW5Z`t(739A2;_x86)=kvGChO`Dguc)W;blGSPM#=47@zaf!U~B9T zEpDY(Zlk4s;N9(gzULV#cg(vWTklY1?Clt$>B$TO$g09B*@r&agZe8fQA%=h99exi zb|`aFS0M%B7gJ2>M?+_5Q4zc-0r+ejl*-WX1TmNj*TxayAzq>#Tl3Qy>sg?mUOLXo+k2o?pK zpg0@ZcD5xQEcJyPX^kfJ$dAH{q_Ep1ovPK5ymhM1#lVzo9Q z00#Zr2ZuushOoDVFNp^YE_bBoB}Jzd)rBny>Lwhrc9^+Dd7O0BAf?rT+eVHS*JHqx zpK&=$nLkl21H(Qt-RyOnO1EMQVr*oqhQws%OsOq*A(irn`|%1 zc=M9|ku=ENR!kgfIHb}2R;z}N6Bym18TRoO{;of4Yt2%#%NcnP>t1UYDddWqK2>j)Np zoNAqk@Vu&aLNf-w58R~4eBoCmPAc!IrWp;gb_fn0o-=Gxq<9j+st3N#HsQzY7DNe^ zM=$d`o*zId#D^lkF-~0|8>MXLepC-eQ~dJ#?tATwk_Y6LSdcD3)vae_dgXbuiwyDp zh68CB)4qO0P0(I_L79>6Pj?z>rrCun@HdW>+H==MsW50np>D*zRKDy+XXgi9_fwYX zvHPT}NrA%DgH?;)YVt*wZ2iGELf)@cxZ~Y(BR)b&jhNAFFsvTF5^}9@LIUTuEo)i*>IG-pLs4vc1R1@e zCd8_9s?S>EW6CjG-vS=08?p;h_6HP>&-WulaRYV}`gjRW60W-jE0ox)!c zXm+V`{g$kIDYzH8UH!@CpTuRh494ITGrGV7D{bDq$)b0iz<`ZQT3h-AkWGo@N62`W zQeWGF6%If!tktbV){DSXRZV{z3+&@I$sUM@YJ6t>p+8JMRv7xAG-~j?s^bIf$`zN- zEh(qx&ea%|0|GtLa~QDyDmY=?Wx7wT)?od5*uc#$rh7a?jzvT|;_drAbS;*mj``0{ zR)Q|ubNw62995|`o#0C*{>H&Mi{MsSu)l2DFiq%1MfOO#yDEuJ`x5Fxy6=Xb!cqW@l=zJdX$n#t9!9f>=mqJeGmbdQ)J4y(i*= z0mH5iKEcG7vX56tMSrpb1B_$vHW%wVwm{*8b5_)ZOrHWBP;^|v14})-ik|2PDglTMg5ru){aKsM=Nf3n#9OwDszFuvvlJ zA5Xv0{VJrRCB-$ajF@LYU`4mkIFf1#1-pYv)2}U}YKP-RJ`@9NSd!=tog1XngWCf& zQkgk}(Bx5$4VW;?%#~|Xm?o>UoudR2hy5!B__5?uzhfPzGQnFAUox%YShYdJZ+Kf~ znZX>VmZe?F|IYayb;8_<#Bo<;SC@tmeZ*+~dYyYfKFP2;N}%w=lB_ti2I6E9*)jk9 zTO};~VR!k@K)x2?(}p5su3%r>Pm! zbmP>>(k5U00v&~3Mi*I@c?zuXZ?B0NEQa^XE=;!lB~DrBKfI}1G@yB;`hcF*!-met zEzW8d5vLe7y!e+KL=}LN9hWf;deC}VE8el1h_`S@!|UPd2?S?}X4{!h{r|!akeUc@kqy{E7H#UjSwjg`rCUjSCjO)o?_^`>wAs*1a=rEQq*s}gj@HMH|>nm zRKBb^wyhX*XMyFhV@Vu?j9@ouCqw(qDP!fi@Gx%Jfj4w!y4ezK#~X!O6ydUc+_oIW z+!uv7OnNM9vc+bk@G$}tj3#)C5cSJ^>lGYXBkkBAqL8X*qL2*4P27RX+-?9klt2)m$(`uIbX=r7??s>2~<~L+>YQ?~!;XCia_S=FV{eoe885 ztWu$ywjMyaAL~Da=VbiEm9$josh0CCl0QoxIRJhUHXBjNM8j&tCz!fACaTRWN<GZr-K{&n>a=4DzrI_m`RcpQCwU40 ztKr65Mt!Yz6W{~3qv|@T^uU!3&ZOruNVZ@D@p5$afYw7cQ~>rr4E6Z7CfQlrX%Tmt z10xvG+tupp-3H8tz6tagYSe8i#b_*&`@Qk`&eewZDl7b_`gF*`)@^Di5r-8-yLvdU zt+H-N@I;2V=bf-N5IXj8Ht!$B3PM3K1+cP~NC}2it84x@C4=>#wFj0_EhUs%cly}= zRf{%D+ESOopkTZf2a2KsqBg;`81O6e{2lZBtk$Mzi;gWkn|Q6G^UECbnPo;(kuNC3 z3MYXNyVomrJeI|p`Seq7Ax30|?9F z#nUtvGfde>&5(Zai)BWl-Vj>7E~3|OX_+)lg}-|4i=OZ<6RU*BeL9~S0(}N#jgOec zn+GU<)Q@Ab+N;+@HN)?ivB`F;UPlk!iNNkSyWQp4;wJj@DGeW8--L?HaB zc?mV+H;eV;jjfGksXabYqoN^IxrUDoz3FQsKHf&fSMYwfTjd*%)#ej2w7*`@MDnpk>>Hh3Un%U|fT^4vTaXOy_l6&Sd@ouX&k00!wF)!} z84No|*zcs(7$+746SEpWjHqDcU?LUKa?BK1j$CCZIHsighc=)jF6jV)aIG{svvEH_a9Ycp9;k~Y8z z^mEtxkk@P#?#NIzU|paq?1`ZW^C^IZz2Pkj_}Y&5^}h(s5qKall^_&=AWNBpu@%G=PDVQ6Gdb}qG>Ed&E|pLHNX}^j z1|MRQScYS9ORSq}Ib)Q9C@!lxXZrbp5FV{M5wkpWec?>H>_H^Ufa!JNgN>keT8g+iwK9M9N_5w*u6k;$Cfq#ZmZtM`ee!YWD?DzXnU#da-Q2K(`&0 z)6WMZraRFop);b|hDiN7FkL64+Xvx3Io&I9Q|Xzn$%#5;c6NMdl+CzgC`L=!Z>OZ+ z%T#@^D~N6vwD&wz$R^z_fj39t&bAu2Zrt;ud5w;{;~3A$cx%VQm7|{M>vmRDo=S-} zl26Xk;{5FBZJ~fViD`cer7JEA3p<;TullTWvawrT){KvNWQF@wF{Y%P*#`alI6!#< z55FKFuCD~S^hUv+s@IzcxJZOdB{b5XX)oI=`4SiLKIbwQ0c0B-7;{kD$Al>EC)88vZCU)O)WdkK#e7ffbaH6o*Mpj#&VcD z5MS(u4wMZkODEkfK!&zqNKtHcE*tE!)H+#_n+KaI_;)kZHI{ncXdY4stlxkc%!wpi z(3=4<4r=sCk^NMw#Td&%ctgZ!T4I|LxePh$Vmqs$zNH2O9SolDpTpz-}*Zz zgAWLL6>&10n>Q5Wzf7R&kIhrCMKuqgc*-ZO^=11dZNXAb_sZ33ekrRqmIpPp6f#`U zTvcSfS@4xRfBb=<(Y<9+vOwyxHDB?ikHqDokK@%3^}TAQzhw=YNCnd>y1!PP|uPW0{Kbww7+fNmaJl=2n`aq_)!!&SCCt%Y)x;#BN@?D&4nmFDYP!x z&TZ?eWc^YR%>1W9%)|rKsdyJ{YuvD0L*`Ifxa**GBLWexRLzY;^ZS{j4_doqFivi> zc8aj7ltCk?fK3x}{oBKS?;f;nlr*!!Zx0dFqq-!dsO2nH$s`Ks$6+PkgomyF=~2Hm z{7COn&=FMR$bJK+D?{>o8D7wYZ4SES@aq{M6VAx;lY9}_&!k{3HoqaF|Cu4x?I|J8 z^4R#zfQehSVvZI)N_Z|fIDIA6NP8_ylen?VCS_Dm1FNWGlMmVoj{mcM^@wVhbiL}L z&!ob3>d%J#69&m`XN*cwZ1K~nV6!i25~z7bkBk?m?%hVGEMoE-37mhT^#Lra)a*f#u41C}a2kIT z8B-VA82gfZjhlC!E?Gr?Xe24amNiQ}!+S^UCX*l5){-;)r zS<~Sz=^)IgU=^OrZLE-6;1O7zVcf)Op=T0@2#=UbXw$Ciz)Le8D{_*X1*y zAIAL#4@;mtXO_j;V<+h5rF3eJHN?4+#hUd1Bg~jS?BvN@?pF(`=4;XI*bia(f;Q+O zOF`(z@?6LTMx#o`+73F`$2)LGqi^_#3!=-EyU-;rszsDhXWCoIDOAmz*lHjB-9&?f z7Q-V&r+ZSteJ4(#jiP$(25?BpB-7ENwbCV3st3<5rCi}d7Pej?Pl~xE+y1UCUWB!( zAn7W&pxN~b_=#$vPR3NByT(5CN3BL9V2m&!m29k893#KvcG%u2xhk^P=_^5wZxDq8 zCKX$)fr@ar`i|luDSKlGVBN#&LblW9xt>HgpiEQA$cC*=tsI0pd!((`RLN53c$FL9 z-?i_N55wP^e=p>>eVxUW)yn-+%|}Q6HhmSpgEv4W0zxYwC>9J-zWk-BN8HO^mm}Lf z-4UPMxMR?WrpM-AtZWLHM;%+(BiPLG(Sq3C1LEJ)nud;qCEv5H`5aR3v)=mY&PhW! zGCWF^3GtXC1KE2I;y2=yZ4sC@J|<_(CL(|mldGTIb_-c` z`I5U2CG{wrWJ=~8=tsxzC)-!#^ntS~zs1llf~bb4guVuKAmeSWBkNTfLj=WG$^Jcl#GrF|IISK5Q){&P;!I3)?rUt~z~=)hG=4b0-XUo`yP>ao zYulo01AhL4fRmt`3O70)qTb$F`Fg`NbJgTT;T?CFhLB(1kaWptLf{xE@WZA9iw;9k zc~~i5d#H+2ZY6IZft2e{lR%>03B`ef9nL zqtv0bXG(;K=!OKpQ~jZ2Hj6H&PH4kevj!mr7SPW@@I5Fj!mVtm!B2ND#+*ybB<3LR zf_dG*?0Mpg3?y$8-R>6E*59+2W%he5LiU?^+~DreIAv#UkLrHAHJBP__>L=1A*m9m z_|A}R8_#ZB4XfI1lzg5{fWA|Uiy^@Q#dLsd*sjT#c^6+ z@D~*JEve3a!#Vc~I{Ut%_fd#v+y@C&RJ7yP!2X47qn#4OQyzljI9$L;L)Oryf)Eu-YjE`r?>R74N)N`|-to1Q7h;Xol1(#{yCqSu6#V3aN3tPj*`WCDw zc#_9}><))s!eYKcu}w#h-xIvi%J*kb@s*Kd^|TQ-_zrw_eHPb;#UrF0R7Z42B7{4b zZ*!0nteJTEH8jD!GsXIjsUsYV3V^LIp_I=#1hEctkV>(s<_|06;~ffJb%t$|U)Dve z6^mCYRhx?F&v|vni8!ogmk;5$vSGd3GIy({kQ^~c{1ljZeH0p^H5SRDV9qx`@$UtJSKIkjPO;NwGz?CW7X{e@)FCld| z5rk>$SRGb8&pz?Txtm8Fg{Ft>{|8$@q`$aT+;?=n?iD|B#Qp6OJHFjoD)l!@Oo7c{ zunvvwJ<5WF51T7T<1O>!RSKlGYK`WGQC>0!w>JmkeH1C~UkVau;)ZUC-D%@pJKI`( z&lWKXu#{9~B+%zj9oxII{VXioYI{m2KUkx6Y>ilFd>czlAEnqwa(dKwwd@sFIbuR9 zM&5F~bc1;zh-qT0WM5Y~rB}hC-&+?+51xHAH~PaEEtH~I{6=FlIa>vEIYi^vK{6FJ zc1;T&y*rb2%zmm=Fv`Y7R~uf2QMzw86?h^YfV-e5HiT!m5G0AcxAw%lEl^Jv4&C5h*rt&4NUyLT@w2Q z)&S{8QVM@@go>e$U-_L}JPjP0EK`IqELy#Cj4JNn5@Xx2)fh296<+$b;pc4{n`c69 z)h_Gg@3nBK)yoYMu7}zDd+mPMZnN1Iy@$&>U8vNrZ7Z=yUHTon@6qB%F)Rk1SvcA+ z_G7pQDkjn&X6r}!=}%ht9a_COiL|Q&R=PqKx7Q`HKW0kyhjzZWMVPt-Q`_EF?a6;w zbMJsH?g+`YD-&iri|^Ad4B%r;*tO3Ce(fm!DX{e>T?c1{c|6D7!dYj0k<0=NUCcUW zO(&KjvZB9$R26)!SoReHr5V?OtU72_AK_ND3mQi_3t&c`uBVp`&wr1igjM4-;Fi5< z&C^S!p2<3noW7(w=f8yq^MA{-3jaAg*qZ*o!h<8A73K+n)?f&(5HlnUIM5X3qtB=+ zP=QXCNxeCcC;vYS{ssy_(YOLLRa6zC)=2|YW@<12PW5Jx*U(@UqJcsN3!o5yXA7cv z;0qF0O}Ds+9B)wt58R4@@3P3L>#+ zvP|BRae0jK1tI?ytqWWRE?lYj#RjOHtyP*9lx$xVB$SuoKEU5*ce(*LjN61Hf_01A zrP)JAE|Xel)zIM>Ff&M;RH^F`&Ih4ZhrXsE0qqWL0r3%(t0C5At6tWLf1~H@)iN7~ zat0#gu`(DI^OYc-tx|FUG^c^Nx((F8BH39cbw`&&$KR>>cwn-6jQ%LxAn^lqA9xr< z52@5JO7r5S;JsL37_PB`ru7oXnKS5^Ab~;SbZ9&`#To+;Tt2Ak*yB9YD?(+&VKhm$ zSIOg}FvV1a3Q2F*0%!>XE!jB=hu#IX1Ox|j=$Ob^o_TS$W8;>j?~^=Fg-iXPH8 z#WRf3_%jOBmfgUXm*A0#Zo8hvioH>)_85qppW!)oH|jB7_%>?Q=w;Mr%c?AGJiBwW z?Zq#fK{Q2{#2uPlds}vA;9Wz(Mc-!5a>@Bz56V<&74OJ)U)KB;^CD1jc9&-Vmtn(x z%y5Y?QN+n=cYj#CV)6@3aXGD5{0^&veh&lUpor2%(2#dgAgw6msS|4AjhYlRyEL)^ zFj)GF($;&T?bM)Lgd16o!B{qJ-Aj&v4tm98C##i4e;YRcDX{l9qCYQ}NeF#t@UIzS zgCkYQ;@}Yc?Z+D>aUiMRS%B{^b-(->EcnsdNJFumDi-1DAbYE?EE4GW=b>LVO6(&8 zjMFG*;-Hxt9mKX%p-lUu<^mwhOieteNNh@tu6vyAtvg*-abqhTnfo5?pe)6R* zo>k0hyIMnnKjPDts=>vbIlNO7Z`I5LqGl=&-=(>SSNW+NQo1=-CGp1h)5Qg*g#&KV zNrtb5R+xyX!!@xVG)d8c2QHpb zXtWg%s%(6lH5Ow4EnWy(#A^)FMKz;p;maI9?YCG$Hk7kpix&ePf7m986M>o~0BOG# zzf6>R)}hf`rIy2laA`&M>Y5!ZJ4P`^k=vn(vQ4T(t5Y9sgxr`Si}wsDK>;PW!Knar zo+8d~!|liDXG*|K@6_-d9CA>l&}ePi^&A5VxjClYWdLFmDeHqROcsfKf2lovKy-y z_@}yuKYrmbVYJwd7)D0X2CD{GCRlBa#PH_rjVur5#l(l2Re9tED$A5K-W4OduC}L^D93Q3O z=ZtS}(KZanzOZ~s;oq-|dsNA8D)vWGPjpqMyHpRN6D>(B)vcPVxlD+CnZ$RG$o+z_zg8WcRvkP;q{t@mC!mR@CGa~qaNOE zS+Eu3gWKJSE&M>69qg0a>T5c$cZpx1kB(Led87P5+bdr+L_fLvzoS`wdDh+KW#7?) zu)9lQy<_c7w3qLC35Nb`PZDn!R)1c6S9Aa9iocAK>=!zr*_@uLWE9`j!F&r8T(h_F zpqBjv)9VkeRXenuAL@$_yK%4W8VUM2CjZ$s;#P8ieXk2rw^2OZ%IjN;E4PxxGK|Ty zYkXWyOx+5GuVOcfHk;6G6_p3xPBk`4Agrj!=CCa_6&8%7r7vK~EKn426G-XIzK-pm zs(ethh>hwiDSCL(7eJ&DhZgFm7>DAyD+7kk-YMO1mHC; z2WoY}jKk}q%yRNo45qX=v8lO%kY>U%h#@K?3DN zr$b>zPb(ZoZ!w2}BqQ|@uTXp_Zj#1UH}2Uhuv>nMMh{OWCsc6;t1$(_-=aZEW-yJ3 z62VgSqFuALR(FgtupJrbG0u|?%XT+9!N))NGHBlgS(DRDePKAowC{a}^D2*7X zlVg{NP^W4MuQdGZ2!Noj+X?yzZSHc|U!Uqha+hW~pTMvr2MGB#o>Qzu86St`{y4X# z^S>CB4z~wYe|ydZH2KZSp9nh$pfyvg0rSGyYLJrY_c>I-kBmw=q^DSBUj&-vM`(Tl z74-~tO-iw&=tR}Ujl3?yMD35#TQxsfSe2(J*Gfj!;N=sjK#^3(AhON|E2g*GJ>l!s zT0OGM%`4xM zMVp4MIYguKgBBy~X{{$aSY-6)7IOiHbCn>vmpC%t-U6kdkKQrJny{T#jm%I#+TIdV z3}}s1A;RPT{oyIv{lVxocJeNV;j_gjAcnoQof zY#eAT&fdhZnrw)JGzsbu;~`!MJzQj7Uk9AY|c5GJu!WU}T_Ax2hHmbe5(Z~qvj?l!4HrYP5ojs#8G8hOr z!!YHmxa3j9_8!o7=O{sbGg;g_*1Ym=?#JC+985XLrAY;0`0fTfl9P`h+RgC*Kcr@Y z>`2>e#6L3lj~qYJFbJMeMz$Bi7?=w)1OOMuyE~M2{6rhjX)K;0Chyu|8X2Zp@x=#Z4G*S37Kh$#lIhZ(iK$W#^h=8&wv!|H}|R4WQ~%g>1OL$o#L&Pr7aK7ujy)=PB-hD?T12 zv77bEul^3kGfXA|?^k-Pp7^j1FM@oHkFv#wTj78noBzS~(?bN?$+Uj}keg8BL#`kG z!4ASFEHlWz(1m+)mk`2sPi}WFsfn-99G|a{$1`bu#Sp*5^WZhDmMv^QqVkU%0pl-- zFR7VmIMd=?8h(`6-5MsC^Hsyk$nvXR0Lck7lq(hSp&{G0PRS2NkGoptFhMV_*$=4w zN%%|VVm{CkfV+KMBOIf8$tw`B$8h0pgKLEMsr=3kF%IJootz+~g-w61I77@BK8^i5$lp3}_X!!XtwwN%6 zuCT`7mqT;CY>JO{SWUFq%AZy07^iHJ+-Pp2@>{nNJCDIQY$1_QSu4c_xQib-WjYI~g{R4=GtEGd$sw#Lmb^aI0P1OPMyDKOp7{5CM%w&V7h{`CG zSUIi$Qj~v%EuyP`iwqY3_Pv_&e@6zB{}dTano$)QESuA%mZ+-qS>;-7L8=4eGOC-|Y|8I-~#c`$~!>dk1e9T+W%VD*#QBEqOAn7A+x5`u?~(qN9^+?znhQ^*@i*VJWMh_ zq#(mGJX;bH*%6Ioi82%xZ&B3^fL;Yh@mrXoM*x?o#if|s;lTcfRhk3asvhA{u?T1u zZjhjKoYC4qoW}}`VQ$4#uPR%F2Y(jkJZwQgRh38WQvYI5~Lk)TkZi zE-_lC#(%2Up;kyd#^ku4OsgCpROF#q4#l5?laYTT-fZDD7;@4nl7=z zqJ${6 zgcg{TxbN@~^y4WOrgie1=@&~Y)4RBZmgPA#Se8m+G&Dqcutv4X(&loAT$sxDsT5K{ z?}KT);!c}iD%Z1vs-@k()?mLjVN3p6t7nqVn$SJ(J3;oK)f#|O%tm?>^P;8bB}Zbv z(J0*UjLC0@iLckwH)KM*Uum$oeOTg^ZQ97rRw!^wpF|1D9b77HgO(5q6IQzTJBf9n zk@*@Pqly=55W8~}FQ<6qCRWexv)DqpY67Ig%&#XxBMTZ0acJ_u3h1i=v>C0E=`9V# z&0B1$DaE|x*LW*LZXgF|sm}L?E?8j#k8sJR>EX4ayTm#u$JGv%C=$m}`JOo@dlPK* zrpyo0#ClxAAk{$ZOuqhiiz@P0gJG(S=0himMItrXVEO+M2m6_24yKj)^G%vNzh7&-G4B9q-( zM5IVTliZ;%QOL%*+TrK7ieGd{qT(~mxHT={6xq7NW}v6|PVOZSz}Ok95?@>0<*o+c zTkMW8oSQghU(&o@=eru>E-s1db%UOR!cx^td4s${Z&!seisMzP^G~CEEX>TO713)+ z>?>7#?gpK2>ZTj37w0I{@D0Wf!;tB<>e=^gdn_Ptd`)!V#muX!>tl*OZ+g+s7=O-W ziAgmFl46p~x3%yPv~1I?w!IiLn;tx_o*fP4mrT%uyzcq-*Jf@rr8ia_Z7EyjupKJD{ z_j~-0U82pG#44Ev;AhNBFOzAlE^Z$Z@IKG|j;M(6zw6jNAH%QL#dU!c6d&u=HU+sc zf?GsO9YYB=E`PI?Ff>Sx=H~b`N{_UmWhC*SDp}uP1UHBH?ikfCzS|L`T~wA<(|Mau z)2U8Y*HWo=T_BcqOI(Av+ec^f6xxNdsd)7>i6$I^zKYN_c+rjKZRk6U=@4QUl$$nt zEq+LyGO~jB99T5X+cmmNx()V#$KH}1>@aV15MgdYZ;=I=lsCXwf5CKD_F#O0^2%#5 z%YMpy%u6w`l^URE%ac@*jgjp&hOvR%R%x?2r?#b>`o$n4fK?UsBiB@NbCokS;-R9b z-wh+aEZU+3EaEG`(*#h{%q@9XBL~pB!4bZqP#`(Ks;!X_;`C+X7qESLXZ$N>{kz*L z{O7OGe8Kbo$9DloI4f<2l^3as^0`%#`*dmdFqU^+Dh*^+S{CGgZv;(OanCYopa3?t zWUo@_thXR_3CSF$@|0y#Pf2AQuiOc6=XQHucW#S1%U?yds?N=S0%kx%Y!9(L)SxqWX**#hN@qzhwxk_Ta)DRSu=!b ziFpz#vb~Ukb30~Da_yX>P?C*ZK~8O~>Y`&(jPhuqj7KXHdFnnol*a9lXnR90+^#u$ zgHoqJ9Cd2q4$O~iBr>&)>V>8bMopmqcabT>jQ_}>*w|25&Zc6L&sN!D;|PB#=r_fM6|5+7%8m@W8R}H~*+0GR5cW29;%JF!X zC5a(GUkbqI=i+52F8S$z_d&7P;FLcbq~od`Heo#eTo*rplKCDP``sGiimm?cd^H5G*#laDgJOs7c}!P7b{;%`1PY2hq9E4sXj< zK2`CN!J8I2X@a;;m%u@EqVRd8o?dEJ!M5#G{+C*i9H})LB~Fn(2Sl7fmJGf6t7@bw z_${mssHYgleYO_@`g2;5+DRc#Fw%97#J|)70Etz**LI(@{1hay$)SnyUxo_{)@+e& z&$drgs}_Ez5TCcm;%40(pM}j+xR>@&kVU*_P_eBWbuXg4t9|u)?Az4+;XOA0aVt5^ zu7()S!W*#1>6SP_mBsypzW3NZhAd=%fSGIJegD40CEZ;IqSz;>eHp;e{SL!zpr@g3 zvgM#TQ=AL7+pQIEB7Y3viXiiwyUms=`=Me8KRHaVw?{&hkdB zUMRY?L8_X-6Mk_S5O%~XvB= zh}$*3W6e)*x9!_(0-tv1#0S(WDwFK3v&-b}QqkS}EkQ}rQ|bLA_d<@~WxukQ2vA+pA1e-}s{G68CkFkhrj#I^Cxs{D5^84HQ36@VuoMWN6vnVmWoZRn+ z*%Y5-%ca`o25^mhT976X-R}MElp6|jD=4>C%^Ei6D&n|Xl3E~-gD_c&v zdf1|XwD_`H5}O%b;LXA-Zp9SXnlkS!s`R9L#d1m9>q~5kO#Y!wI=|cy&tqbZZ>2s0 zmXUa*43hNl%Bz>WcpNG1z4=(_j@ zZsA6TOD2%7Q^rWV)2?D;=eJTKsfflxe|2aUw)wfE{rCu5oI9F^`yvu~K{Q(>@`LtV zM0x5yrXl-r3!zc;QjH(LL=0F@zNA2Q-RAvm;v^N;_2&#!{kY|27u5U+f;j(b3wu^^ z${MAeiUnh(ks?3gr#c_!iJNHsU~b|ONtX{F9PmH2aae4riCvCY`5~IHJ1~f;t|=x) zdc5j~=l8H%wW^BgzTNy3cU;&!8Fuz)t0#MAEPbyj>#~D*j$+oH87s!9ZQ=#WgoN?d zX-nczjRMH#{gla`1Ek!4WU^P7GxbaW5xD3~@;g<*z3oSwShJ{r)fZcUsH69(%27r6 z>1f=3#ggjq71xa?Ki7)NQ8n}Fk)YtyNM^)bTg@*nnm)C}ROUm<5Pe_9XcXgc)TN-MOYv~)D4#5Q zLmtRz0e?D#<1q2wz;QWfvvP`1Sb)BnRJyr1;e7d`LyCizt;E)@X39uvRFAl6R%ugoH^nMh z+-^&dCwLC3#eAskB~Xx)=!4H8nbM6Mcmc5qTbo77NIneZh$h1ZJIuBr6=;XcWlTpv zFh+@4@n7;bW$l{mW1Vme^u}-0$v&?~&sHbdk9nXlzf2)Q!DoGW4(x;=@xq&Qu^mj% zd|S&)pEvzzk!+t|W2TFwT{IBh|3wqjOWUh$4Yq-b9%+fOMYJM03~c*lV9fDL|} z>IRhOj~T$6>&488ctiQUcO1EG;GSz#!LRO!%ua| ze>38Wq=f_M{plD1Z&zmebs)~$HU3hKm8~!Z8X*p7*Th=}?)_Ak(dy41JJqy|E;qI$ z^+%6bm{)$eixE&>l(V(!=SNc71Us@Sc+p%Xa}w$>PC+*PdGmCazu)3V50GxHOltRj zRh-`n^xM_2=#nnPcWO2oacX)Y_=NI(>+Lu+{$Y*(u(k5j{Jet}9mFrTa?HJP3P`}} ziN`6m1B4&@^qD<&tlFPM?KXcyru3XUIbL0|9$hHJEw(I9X~X%FD;$0grt_0Z5T32r zPK)LNF#Pyfm-IGin-l>y=tDaS0K9YM49Khc)s?{Oo zJhS==ukIxnn_mJ}(wItd*1qiUvIi}49*Pqz%l%xqe{o3iiH*A5$j7SuTOEG3P-aJP z7An->Wn)$H>4}|M&7S;o;e~hyvn^iGgm_TI=rI(c*`u#CA38+cj)^@xm(G zlTGTR2w^%`Av$@p6T1I_3h`(nO_aqK05&KaHdbw9=bB!67(}10vxSub3ag7UFT-<9 zc0$)k13$m!hr1i(T*0=>ynK(5#EN8b%ScO*->J2V|0Z{9`ajIw9X8)6t%ey?`lL$21k!z`J0QTZ?e66e{Wt64T!lHkDnF%5q>3b-X|sc(>2kk(L9w5- z>_^oGZT^Dsc`ybZfxs#z%Y1*GzQMu%N6<xIHTNx9Y||x)pLJUU zIc?vqi8cY;YO`yv_VCwC@rSJ?_6MI94wWAm+}(!0u`UjyVkilO`NA3>%SEEVUbx2K zZ`Q;q^u5H%HL%y@8o#4Tz6-KPyUa#P$#Mrrm!nL^HdQwupP*%F7XRH{8* zGyU{XAtDzR@Q)3CRi~c=8Q(YTM!BHPOD;0~az_)gk>Y%Lna<(=a6pYL(<49BUIaev zR9Ru9vHG;kjO(^|mnbuxV2T#|RZ+k8cSo|JX43LLu+fSUQlih!jO3|bTrC-_LV-_J z_|*Gv^*_^ptEV3SbN$aRdGsF>r*d+}(C7O`>L}6I^C*>Si}#6E;Pn>a(vdZR%YYQi zq#m>lX30`%D37*yW&8vR8n!DK+#Fvt>04K@Nn7$WWSBm99kPK|DEYxSCPyBX;cRY@ zjDe!LTJA|GcL`H6+$dR*f*o_(hs=yca&NK4BpXSthK)8vZXtbqp5l^zd713RljV9c zvLG;}yd=tlq0$OTW#bf?JY{QjFv6bh(n`x)ljQz9J%IuQ&nR@`0mL8OUr1jo#66be zt-MS&4=9=ueZsIOT}PFH1mtOV4Dg{E22~PJyp}X1jwE7A6n!u$*5NUZSFMFq8|z?= zY9IzMGhI^7VUudlk1Syf%Dr{7zknS}$#~c4Xjnnzn8IjeRKa9QV1 zg0p_5u{Y)ZkV-b)t772w=Gn-?L#|Ox(7`MkQYeCHIZI%suh+#S(sJvoYF0u683T?; zx^9KZ87_r11}#$Z!gNX&aJEwE;~M-sT5p&`=DrE@a;^#G?1ytGUP1|PA?(d$iS9>- z*y3Q#?u#Pu6s?=0B%LsVg%klo^Y!|ONANcq8bXt9#m`PqVHG;VayPo3uT=O_0#_5( zbR%k-d`VS%sR*lV&3motGaoH5Q|6c$Pf@WJ17ecFTQs($s$3XA&4WqKGNEFpGx}ci zQ#d2@qd={rdl{7=S_oYrQ{14QJw=37Ljwd*Q(F9%E~C%EiZPD2~Jtk;8dZjJow6b#IkRcUYHHvp{s zX$6w9LGkW{Ru{}SMV0HcXe6#nrsPJ-G``h#n~GRvKojjvhUeFX2kI5KkOQJxD&pK` zd)Y0T4@wWxF;q12Hk;3@`7s`BL2JclvFVPr#7bD-*9ox4Qpu&?H;H;O&!mUlhk3;f z&?Jg32@MyCZ_)VmOl;RC+aELg0kx6r!}tSgklm_LK*OT!W?EOew=UU=PWOPj$8TKK zq1i|;EtB}xBayyaHH#Kok=KOQ#fs}$wT;;O9VqZql`XGMG=&4<&(~7_l!(khX zLTCvt9;>=vf-pw4Lwl*1LmMUWNrxfnAVqml^DADmq%}al0i(d+f zeMeA+P@fePU$WMEbfIazKm}4RF!`6vk3C+cL!ja8Ha@Y+ITz$P7`|JFM z7V$2I_{r_)>R53X$>J|$YHX;wy6*GOMv3_@&WV1a{I}ILiFa!C@^C?n)4W~qnVJtf z3(rrFby7P&pj$kx{A1N~Hipw;gHt`({syhitKFu#A-ZCgEgtCtAb#ZVJ37P>9g_I1 z&7AiPFYC4Z_{|#oqy_d>e*r(9=bPH9j{?^DE69h6UAh2qfIoorwE zq2imXJt;+syi%0EPURA)@_PN*D$$C3TZ>1?t6KLr>&brwsqo*H_?7<*QlGVi<$nUH zGR>-tsSH!SB_GL{DwF?PO(CbyT}kB>O27`7gN@)6#WpBUa42k2N;ZBR$k7~xl;2$$ zqqUw{%fGFG>TP2f|i%+UPfIJjIdRyI~0s1fseIB9MN~L7Z3|bzx zK7o@|)$|yn4x|}ncAub9fQ{@waz|m74W^*}-V9t-tuI^%UJDgcTWq-coM2Qeln0XL z`fBBVxCD(GsrK&EKSZf`9b!$uz8CeR8=;TUu%F>(rfk*trBj8 z67ymx!U~BLt1m1-x#=8*OS+Bwq7N;Git7U+v5Vb^01y~Qez`%1sbbMKKVJ25k3oFA zT5WrHEIwCZRhXt~z%~5x*IKEGRhCJ#j0NE?bimutb5Is5YeBk;VfN&j?rx1IgE$*@ zs?48xrWeOGv5TSD52`GMV!ynq1R?g8s7wZWdz3A15agk&=L;13A+;~bp+yUzVDX90 zZrAA5kG8!8WI^i}atu@_uetj(zlara9t8*=v3Qs5m!q{RU5!%SW@@y8?IFsqLWhmC zP+^S#8eS4>aO=f;4u`leS~2+-Q4Vrn+Wd?P0C1Bc6^!cDQE}ecDy<)*3cC(w3kaJs z>w@ey3L?H(^Kk7TI?rr0KuTV@yjrzTFEQcinv_(3D0Ivxx;@J#ly1|ist(yz27A$T zo>sbX3Ncxh++7VShFM$CN^2CG)kh}HMtLK1J8VB4HmWc5P-Njo=4HRu#3NQ!ZwJWM z>yj;G!UjLD6+{vrr}9r2KTBa#9Sqh+`HC&Rh*>$`h%-TE^Ay_WQP*8fZY7Mx$+Au@b4@S(kt2pFgSkp^s%@=$Fv|Ed zrdT912Xq)OmnfLXW~)?YWE+*YR~1A8ym-5|i{Z{!#J4)2;8kbJNEMP%wjbW11z}vp z&nqO)o>h9|^x{rJYCwUPJ!gu=Ef7_$#!gW)?6hm&7L*fSkLN2kt6x>>0)A* z_o-DHBdja3cy6~P`ydV0abJw1#aIjD*|A5Wrg59itqw7-CfmQ#NbY~L?v>T&7}-f} zWC+%a4vzN(c4VxQiZKWh0YNH1U z_Z9rRb`@qQ9%`#aDSu0T=PR9 z?NZ5TD$}>dY4SNOTPmqj%mpWr?2}zbr--`Pl2QU_d>q%-zus9b4Ju>BKi*) zg=Z+_T`FhBw5kB7_($STR4uob_iYx5BDhZgr0X=xBJXp{N zJ8>`-(aV}kX!SOIoJ5?Z5RF|d)Atw$qhbkerD}@CDa0E=x=yN$67U$gl3s(}{)0MH z08fm*ESWYEB!Pw`e}6Z2_As3X%W|UJo71NQhm^jADmqXW#3Wf7qVOgxU7=A49Pu5E z766TGPL>CgE!pa}6DbrTZV4~c*%Ez-Qr@j@stqNB!(~0lu8^(1G@;s9&G|}@BqQr~ z`qFg1LN;uABGpQ^yD7alebXSDHb~nwvnOfSNUTnV=`>HVZ_ygbdfiLUQ78e+UZ;~2 zE+LB-R(#YBxk%*`+O?j9JTY?=@f*$ROVTCP3eD=%AA9RnuDh2(r(7z73~tpd=Uyu) z&sN$*^(B#8G~Nm!e6~_gmg}S;R!~taKLpF*k%3cr3w}l;xT2g(E2!v2FOvOeo*Ce8 z-18#YJqYCX8;z2oz^@MF9neXy)9d&{o_GitkDORJt*9eMR=Luoo8%{`woERkZc31z z?9c+XdU7dc#y866%z-R!gE9k0Ri#rJ*>cHU4vn`jtE0`qz6IF8S!)%+aioX~#YzS)=9deGsuQEWHR03j{3RpSkD3m+< z9IjM8MCF38ix=OZi}#!=Z;bLSAdC6QmYglRVI~`$rl)>>*4W}6iTHK)X3u;uY zuL|2$yHIfica2lenhJGQxT_N+`|7PhOdo4$!i?bHkgr3ePLyd|XYSgO*}jtSYKp;4 z0T9;%Kp3{~1)c#cQz}uuP`2KXaR|jrhYc|UY?MxtE$1H9i#prI!>+_;=(Hd^#NZ1s z7F&aIJuVuC+qQYBgl=3=6NlQ=Kweu~s?s!RsNwN_X!Lckz!w^#qn#Q+*uE-uYk{P{ zR=4j_`s^JnZq?<42ntY7vV=%JkYp~V^LR!TNyN-(ZBugo0t-HKY@bhYX zx-O1aZD*n3L2A<)efu~SW2+JG!@T4X*Dnsy0{p?1F|7Dijvqf?1KHDOHMUkcZ22~z zbrC64s%RGHDubaXi&-28fPlRSduRZQd>hri6;l&URYBr5nvQ+19+ zsf};-QqbG#k!8F^A`dsZ_>r8Pa?hAXPgu56F*84!Z?e1EDf~26r7Ur(v=wgF!5PkG z=D|YnF*e8=w7ZKgPD{>c$u7IQo%|q&YkVbr1)<8ktQKj|S{oRF1uh#$idCD?$!Bpt zd)EjcqO3;xDHFc{pnupRPPMF_bmvZYe_iZ`F`0ijiook-(@Um|z%~RKmG(&AHMrC* zb~j`(j5EY<%luB55#6>p&9xh`rF+qxn%G{I*q<$`nT8#_=xCc(vxyyR)Zf%Vi@cs! z>f!rUxH|eJq~QHjB1NsCx)KnVVd zp%>N~{2RJh)hdhMYPPdL;lB|a{CPjO`ok2PXIkRXE=jx%B#ghI^C>kkfzlWj8x$%- ziYNg~ZNt zDb)H4-91wCb6PU~O^TKN0ye1Oi02iFec6Hz??H>(w8)G)QnTrID$2v}*2EfG6!NIb zuNWgHZ&#Hus7>zS^E~kjU1BHN_T5?|d##N>VEfVgnA;3m<8Hw(ZWHSz*}kh<^>eon zmM+miFK%UISZ+&k3k*!Lqp5?O!YrKAT9MOr|ER`!F=dZ00s2%aP*u`XRUV20bQiOZ z?N@B6Xt3-bH9bf2#V5?KzMfo_qnc3_0quQiN#$$;f1U9~$ILB>=}*&Hu;;&i8aYe< z|H-NKe|w<*IR^?)5mB9k!MLgcHA5Q7nvSO z03uil1v*XYEo0b#ln?}-8px~t@&0?W=8S4Z8Z9YaC{qB9zNCznzO)P#PG7|Xv(}?Y z@<Z`ugTte$n;%~HxN)jgY?Pd(PloGO`nW;?Yvf}vj-;q-wXfRA`sm4;|7p;?O*w^Jk9qqQ*% zz@O}|d&vgMwa3XWqf`~jJc%|`rTPN$Aa$p(2jU+a6v(l!HpuXxV)8gP*!~nO zRD(YGx-MI5$xLsgr&wM_z95a9NRTBXvAyb(r*SJSnc7PA^QRQ@WTCyvv^fMk1jLzQ z)M=^lLi*C~LYZeKWtb)RQW@^r0+WKP(Oe1GC&W~#GQ5zHTqBPluEcbnf3PA<=W{8n!f$7C3ssXx8sv;nV;|!GEy-BJbBvl&G zt2Itpd{4N{s_M;=;r@_H-vzH12e7K*$Ah$fu{B7h7#3_saw&zkYV~4*ObhPQ2UYqu zlb^r>NJFLixCffRHh!^LXO~tfP-rL?R`JG>RQ3l|{;osI!~~UV%C@yAQ<*Briia$0 z!=C@8sA3*h8f>y$TioJju~Mz5UrpHi7lYrX1?fDcT8`@6uU564vM!r#Z`t#dzAU|o z@$Fquu2RRibA~H-#V2|Y?W!w|*-PQZiFx&$iLnzgeg9 zDYS~9yEVfUZqh-+gU6e`Tp~f zHl-Q{8P!ahYM28dnBHx0Km7%(M;)3yR~cz4O)AzYf_AG>eyxQJ^vQC4C@eLd^FR%f zeROlju*^r>Zd>&cr*t$SALT!W6J)_VxYbm~hh+%^CwoH{yBMxl1B$#>&)U=AA$k5p{k<%Wv5jH2^TD^wOJEH9V&4c+85D+otQDzKWHHsj?Ae!zUbl^nwJPHIy3Swk^urfx_C(_NMmf*q`!FB&9b4l#RmDeF z9Mdj|2QY+7w}bm#v{vnyuFCWGZVR8m#H*c>xJ9#STUlN*ll%EnN-e|Q=tM|^4xiTh zl4uxXRfRQoG1wod9t4?nJze4ks%@~GIYSB>{QO{z%(`r44F=hKQ+&HaA}7fBR1Hp-iPQt6M1!1W4@#2NK@}oc$Na9EihHRQJ&~ay|y-BPvzp0pLBo$g zuJ**|^pK(}KNSIeJk2@D;^*{G$itpzI-viPRDMUbm_jUU5H-bNKBsMjnkHOam5Q2h z(HHT%l(R(j4yrm(RS9(R6jiRJD)2Uv#A=g&!8Y+ zM^#4ZAAYL#cW3-7HO2qtZ%T^)MNRR4rlu&bN*N5~>5M8trVD_&sQwUpH=Jj!^6!pG6zqrp7iC=DeLPUh}eDep2Y575RSoxPXx&|!a2 zxq$(y>^T@yepN;*r(u=gjBcV()x~hoa`qDdJ@qa`EX1O$^{e2$N(q`C*mLFGoNLY81Ldu<~(2s(gs`cVQ)rLZeAD{yL z3gvFqp`@#il|HbgU=>w(Jwdhl5(-{c1sx3Ns|^yx#1_c|B~?+VqMj$JY@RYwXEaBL zq-c+!()tntS1QC_9;($#;MyYDc^2B~6MZm^JG3h6dxKP)t-9|SentyBU|qtN-PLHN z5X?ZFY^_oi%&78hP`Y=twMrjy_VcaP-ZC7ek=P(xn>;dn z!8wUMfm&3GI}~}LZ1S070Z{r8F-OZj&mq(43I2?|(?r1NDloa5sWB&bQ-(9Ft zwYH&Ca|RhyY*`iCe@txA)!th==4g#s-k&fkPZs)HK3BHToG1&2Xux_DOSjTUec|7N zMAf%sSB2RMF^2wvBAeUOavy;o!jw(yDAOF95e&{YIK*dq-2>gsl~Wi$htP_h)SdyE zE*62)>RSi%D(V86w}oI{^#p1^n5rzoT}@$m^%pZ*v&>Nn-yh+SK8!W9%FW|=~l&o{J;M)*&e zb~CiQbpRs?Zl_jJvCEiG_F4GSckbFFUL z8zLvFKj#50iE*Fe&cL@pG90ecJU5JeZ3TJznJhzIgpzl3RUYw@)-EZ~s|d)DemQea z0n(igq~!dYjFW0)8E({Z_F&_lCjeA1Ri=MAZnlILRWQcx_ZxcQMBVjZEc4HwfV4$d4anI+}H80_MKxo z9~_sh-LQ;pQzYT5hI3EZl-KDXSA9kIz3MHTR?bH5_W3^kgAxP$))qe|{voe!*#1F_ z20tyl_^a6L1r~%jTPD*ol`bnlTIqZ*VEm$2uWbe#oyzHL`?>{1WIvi#v}pGotN5E( z#khga$S@T!t0rP%2UC*sWcbWhRl}k|mN=#=bRXEbx=i2p`$wa32iNyf1(IU29a(bO zB7Ir8lh*1Pf`TkXpg1$AD(l3b%)gMT%8#<1dLL=g@*e~f<2ijaKNIB@p1)_<5{wF! zz4$7(I#o~_1~yLx42_yUO`u7ChO%5$Qa*BxVbDq7YFi8Z%a(BM?EpIfnh!VWlf~&T zDT=R}_nVS0DDCHygt{?2-%Cd=%wWCUgTw*fIO68CRW3ATRDfu$0f3vA9;K8FND8ry zc(g;J10z9+!kA7_S?jwSz{|5~uZ&2{!Js*J`;YWPJiXfe`!-t%dk+Rsi|0{V%rzEm zhqvazt>l(1!6r{N)qHwnIu5xexA9-zULhNbXP+%_DBj`0 zV6oJ4vhj|q%aX*?rC5|2z}``&nOAa)v^ss&tX)32l~5Iq$7>F=JODl)`P(eRxqlB% z@)Tgn=n*+n%DxY)Nz>< zLcTG_JggG1?96rpd{Y>Nd{qN@$Wq>o|3>V8E(=QI?0#RY--3g|87=D)Dh}f`)~@P> z%-uBZLeUB6b@WVPt5@ql&^c zOP+ml5_SscP8$kW@b=ZVxrL_cFc9%&4(5L?+8B4-Q=*oI{gr{vhG-peO- zfPjtVN95R$Q>2x292Ja=xiBz4nis9huo)+*oyGZF=a*k!bO#rV1suc(!~SSEDtBir zm~sx!IC@#T5tDeEWHkoHaXY%=zCFa0=ya+zTxsh$kGo)%FgAN`3 z$tk<>%d9aG{}6XEf@siOzgs*EJ&TJuz(7H7PCKl;;XQ=2j=v8*z7sv1mU5u{6IxtG z{hJV-F$d>4_B&CuXv=k2)?Eo+6SiupE!!; zZx?zq>;6VEYjQXC4cZ%|Iv949O>i$_8*_wvX%|TW>=}1%U%5tOsObq6t}#C^B7ZnU zr|r?6!@QmumI!NB@|+C_e@~sJhjwElPR-#CH(jAq-4l z+-FSRQg4X(Sx!t|9&~}s4@p@#0>(FW`0g>rF4{-s6>nF;fF79B#4O+|9mdrN>-K!5 zC5A0xT3-iz=9f2U%E;bH=I7hd8iO~@T<~i{raQiBF~>hw{hT8i4{BhC?ZGncTUU|J zo4XbaP_7ReMjxI-^I?wAj@a~-1yK;-a`2WelDTG^3+BZ3p58x)oh;_RC*p_OaeOMj zp^0UJCgEXS6zp&~*hjfHuO2u5hC!=>e?$3cb*^h}7vf{5$I&r;luhWoL&KgUZp$gi zULiRiSJvK%AsxonJNNw(Qh^RRFS=DXlY1@w+%gp){{{AEhwk1wcyCBWqordb@kGt{ z&_S|g!BuGHlDhaWPc3(0ZY`I_A)C&Lm|#)Q0JycsDO@ZcSP{`?i)^O5p#g;GfCPZ+ zKocTbTEvN<2-5ZPKA)pB-EidmP; z$~{h5`_v$tdyc?=>@N*{`NVXs(81;FJC)yEJQi@Z$wUNYLy+m~Y#yQs-2YYEe#8Q+ z6e^cOx5xwL63%XDjOD2_WcQsEhw~2s6`syO>EwPdD2e;!7=nK{+8NLJj+ZrQ>u5+w z`Qn*Mp-LB5wOXpF8#&IKe?e7Bce9MCV&E>Fe3*=+e~W0qw{_X>+aG*Ib01p$q_RyG zz_OgV6q8`|w~#mG6mA8XwS()zvqN>Kj+bsY5hQ0A|Ff_$PZddT{yi*wdBA-?qekM0 zTKHj}NhP~h5)K`q^UHlH9?%VZmRJMhH~aNdXl*h!aSZf9C$kyiL>WJVni|@8!iRQ` z1%~C4S(V{zM+8@LW(p2M7{k~G(8Otua*!19N2V#u-tjsC6`>P{z>B=)?y6kO4y5H@ z&%MP{Jy?8R=GtWoGE^9LzuED*Vnx7OF&-L^i^$>$S<61t4Am-^$iT6}iV5>ABnQ@~+Oyjwc*ujQt8`rW zGQm`CYCI4SMAS1TLZu6{m!I%eoXa}3SPWlIiT6&q41rI+7aYMqR|{1e?5BC03DTnJ z!eiZ8(J?M*YngM2WLzWdhsl^v*_qgUjVeOwQ4Vy}EaKnkV?dtVbFS|W2fpf&H71%N zSC`7N&dR;C1YRj*g92(dO>C*8Iiv*Ym644Oij8jzYJY^uAN6a{{{4Emb*)^X#2m;0 zAmQpM=931uVlXXM*Xe6C5s`hhDntzev zZB3CI_UVNQk~(aPnI}_?8T_M*-OGEF!sND6M#yYAUZl7w>m#(oc9owAg&tx7GvSLk z%$YQttG5w<{l=*}68h>rZ`K;v5*^XIR(R%t`Nb>QhKYmYDD|F_*@ZEIP~j+xnfEtt z_nr&SqAf{2R?vTj+yT?5x<&lcE z*SfdlP2jYkIo|=id>E|sN6x9G@MGw=FZOEZ9;M{SEzW;kf&cjO4XZSU`sw9rmpir( zEm6QCaKNn)dmXiwUnwgA6U3z)NqqJ<7jdmVIt)lvldU$?UBHKUp_bY2``?mYfj7g9 zQJW6Q;eOx5JT*hQb@pUgUumHc?jjDhLTnz)JKre$o-;+>y;}(SFzVBAnvrk-FK>)siv`r))u6cREfAA zDLd8Zl;Y=jb$BJK`|nTBp9ZmiLdtTGP?!)95U>z{07w}GD zW-{?KHMDjxGxsuNWn^RIU~+IYGk0ZTX8Js4ZTh+QALkhU=ehqlYVO5kXy(aiVde!? zhl7ALdMwm5|7`AF2oPVO9=< zl<$01CBJcXbTUw^Cmon_EwG_b!}fV*r;SM#7-N(sMt6-01622-lpsfkjwz4IuwCa!(}`v> z-h#itqkrIRLjP}B54dQH;rh%v-+xDGIe3`;msvmgUs*p-H%!qzPtr;nfq>e^IlM^2 zSxxG=h?;Tu5j6RVi~K+9BzR1Z?aO~Ilk^l`KIzZFYW?>N_`fcbv;Q>UY@MWx|GzcVz#qCpQ&GpTTRfUWNjU`jXQBEIWYR1Ew>-X_Bk{@&|4*I^X*Y#N+ z4D(HeDWb_vdb7gM?d_|1!}a&4S;c1xCz^7q=92`KOOm^w8Fq%7yOHw6v8LBs?X1VP zd;>8Bxxw!-8#`?Ym^Q@`xjNfm{X@qa+z^&S@ex%+6H9U12vHwVlWnYS)OFRcY$T{P zh&2XN+uFvk#6^V}*0W{hZ9)lshL{vMd5H0tF|v!@QR`oO4#37ZR?#o?x!Lz6o6Xs= zvQVjCp7dLxFtMekcC0DCp3bO;j!U`X@bxChK)I69*N&3HqJEt`M=bH1=q9&mE^s)h zfO|?%pe=H)^X1UOVmwe@gDui&3ADnSH3;#8V;uv~=#|sI z+;x5Z-ksGX?*6>J8%BHD=_anODIMX_CT^Z)3X<9;@p0cBaDG42?5!U$*F z2xq-vXQE|i{b6T9!)14(Ibncu*5U@CDE#4)CNDHHnE&4=_N{>CAew|pb2WyM=~Z<)>x6m z;;|c&(52I@AXW*czi`GRC<^2!36H|NJmDhJ#P33`OMMgJMxes2>S{IHftFGiJrn9d zx(1h)$sdVfS7Vp;Z;%FdY?r?n&!9v)jz9;Qr>E+6(9fA9{N2-{JgL`ovtufaS#yy!~P^OJhz$l{Dh(1TV zs@8NdT+UhfBF{8OVPkJ%{lPoDWRGyv+9XvKCt}G>jN@8H9P26GX!=?g#KC4XU5KDY zi%*$!-CM7;CwlsqZjnNmSV&x~gPwCt#%mh`v#}>EqVNVKt2y28)S)6Lm%r#9;05S^ z=$wRKetmQLvEdyyi*C+(04La3Dqd*ZWZI zY(>fHW>0d4RJ)3{77d*U#e?jkuQ0`ef|E>5YS&BF#ye-AM?G?{#8pUb^WZD!+d7jK z+eom60tkb|bURU^8(VGdz7VCDwsbYR$26Af3vbfSd{ubOXa$Q|Cl5bWVAF@UD@Zm?3`4t*kJo+&9z;`1v(zi@(c= zc_X5y!~7-wJO0i1;jw5&gwC96MO#CF!IfkPIRpCypYlZriPz3u0>(Ke zRh{-uau8a2m`q=Il)e35iE=6fN-CYY#jF+m?KZ^plr8TBq+*8+3E&lNt5#jHZ*>}Q zTGUmaet3r_8HSWUT9|E`#)DWtZt{wVK=AFf^WRdE2ei%MpuoU$Qbd*0|1`sR`dtLqD$79hca$30!MOcve-9c{1HtrYd6}gNf@=tlt57 zqzy*7)byxF%ZV^0X}daj$E-BoFv3B#uK9Bm!5o4yBG})CvJW3u*dwQI)6;w5IU_^4 zY0weqw`?yM7ENS_2{w}UCv0_SUSB*eU}s-ijT@kLtixm?9o{_=b;#39)m$8;=QE$$ z?^ZguOtuR@Tt>RXztiRnW8vmKeR<7HT8Bx*@zBQBqIQdOK8P#muC<|dwbq~SmLDC2 zm;3isM^Sit5^@#G9MEQp-RZ1nzO4N&ppmW%5Q-$=o;u3($K}f?0>zKG{OJ~$m^Yez zyl&N}m$GC#dSERF9>^{$YFLaQEi6l|^_M0ZY)FtgrksJ7ID!qZ)5fbRT|`KC*HE6| zoVF?U5$w{kWE!;&BbX7WOML z)iq~RR!$PM`VjU659qOuHIC`!1#pf~H$-@TcL~Lw;|)VoSsj+g$#9WQxc}sqJR7k0 zAXw1DA&MLmh_#G+rzgzs^oVeDV=T5}JHVzYiID*w^bA~ykf_6c1?U}rAK8lNB$Eu0 z5}X1%u*y*%Xl8_oDz2nubHrpf>Mc@C-I4`lfo;&YJYY)o50Qb_V)B|e>=%QyDJy`* zu-lh_6NGkk3k|b}UzO_L5d4#TF#n}_)SS_%cOu(8y|nm%Rt5lyWRCDTq~b1wWw1oz zW`Qux^X*ePHfj&ZoS~1%WnhL=EZGWKkD_$LAUA?k)5r`tUTbG~U5!T>ty^G_(c)9z z{q8)?*m^H$3UY0LpODK)RR=Qn+GlasHG%HTiudI0*0}jtJ(^2ozzQ26kxN}LbNtP7 zz_9y!=izN<_}lx6L_--qQ9pN|$vzZ^v{*W~_^eIWXf_{g*2P8fjMBB=7)aFq)ZAr^ z5^1$j@`Jr=9Ff038GDL+nH4&umsL|}+_uR@QFtBX5rELvch$%@vKG(7@v&~bMAXKCMo!?*5(7s3g8;En;){fP{ez_u)s;#a zqS=+vMNRJ>>Le9tur-9!B~TL^k?a`riyl)>&8>3)C_s>|Dxafm>D)rYJj#>n!EM#x zhC4PfNu>yuXH8X6zpysH|50$|nQ#qRbQiz@;@zY`ZV=XA^wuyw!Sc99hS#fF-h@#MJe&pfKEbLLAeK_9H9$eL3q{e-wu zf$tB{LcS43DYN?OBTs4n4U(l!@Suu%mC+;f>F!oQopDjKBG$8^n4eJaS67CYHaFrG z?N`K_OWs_*1tj0!;0oq;F>S1Q_BU8{rWyEeMs16auohiE5y13&amWp$WvNxS?HacW zQnxvGp%b@zv}R$9roK1|PAHVp6YD`6Zz>wBFoKLHQ=+TmQ7Mz?4+7!e>DLvi6>LW} z(^H70GFeaRk9iMY1e~cv(rTt?*O0Da3e+>Rk!QahzM@5*$Rhvp3}fXPf0K|R z1E>DeaG>vyVk3j!_ttdMOi_~ucGOlmU77ba)$7JPZz4BL*19~W-qQAKc~fxN2dRx-=EG~w9!8vrvf*&rK=82M=Hot7*OW9`zMp0Y;@Wb(25|o7E7_TPUnLoE+ zzn$YSCN?>Rm_&2VL%}HTjrheJ>t|^YR{X#SWKUck=l6p&mw+i?o-WYSl%dIP`8AeX zbZmy%nyan2`G|(tNNBjiHFXQ`n|Qn#zbSel;6X54C^Y!UVhVFxTa-QO36B$+z2GG_ zae7CSExDuSUewsFHFXt~;2nK31`YL0B)&iykpWm1E#x01^d8n44A?Mj)s{AA>=?~n zM6tx-8+?S#t5!?!`(90*Wn-fP>Pe4xp05!0mz@Yp^CC($kwZa*MvB#683n^rE8F5q zlAhmuL+Ug3@=rY~T2hQ;JIAc!i}1FX%iI`H*E-H@qfush=CzI^NlD)zkLHviKcsie zNTRhQ!-isVeRdChZ?is(NT9qz1yFm7SkJgXndIttTP2pU=oES?@KP&(c*~>zUNAg3+f%T>Q!#y z!s8($$o`yFXN~Na7IMY^W|V;Vg+((UJtL)$85!unrN4vRYMCi6We~I}F8bwn`}CJXYWh+iHBKr&(eHBPu5EpR{7_@m|5qgsJJl z>{-L78#lti;>&^ib+vu1TDDM5eAh z4bW_w7RuI)=-oKlQ>7Z%L+qpJh&U^yCE zN|mp+gR=T{H%!!hzZI&lmRSYsR>chzG#|E%+8=(WFvE1m239)8q`pRY?ZdLkNv$ZX zR!EatUWLhRiuJ!Bmx!E|VGp>Y%MYp`&mM4Lqcx21#i%xjI=W#hd{>k)S-PTjrWcBBN;#j`>rH>2{HM>aMLvgGG1I~gV`R&&Q?sjAI+k3#>|UtR3wekp z*RrX`E4nahlq>0L?2H2s8t}IZJY`hFj~UVqY}Z6KWjr7gcon*7?ULdTh0mwc zvzokYw;fCF-sJgpIl~0UN8CJfLFR!4(K7p^q`Dm?RZTns8+PQ~Tqn0jr7Qc8@3Gh} zq2(w%Ub*y7EFY8%S^O3eFT&4p`{|qc0yd5my^w+0D`5|5_57VlnD!2YHf6TH$sv92 zVF63)Z-hJZ>_qX*-T^28zQH>uE^&!mxz4B^{?sKXR~pEpge-zfQOYfgL3rPg#gsa> zPFq`iNUYfTgmfuP#?r8Kp953*WhJmb#2{o0e4AG}DH902pS%G_Vx5V+h-SQ@`DqWr z0sS}aa6-0k+~jR5!k#&I%oJQ2$H=AV)znja8f!LJ0R!!mZBO(Vm{GbWui9vxol(R+ zM0VNa+Ot70y4nqN`lv?jJ&l^GQ;@oTEZwKPSIbOHYObQvoNk?qfU4BAM-~w2`oW=^ zQp2u~@20?5%;A^vt-vdJzPf>HQ{*||>_m89#KzLpC{%dhC~w#ytsFk}IIO1goYj|m z`^Y_IDPPziV1Sq~=n~=)fz8VH>UYA*Ogvk^0%n^Mnzg_$KH8g7i4b%Z=W|6tdSw9pz()L`%HdCKyo=0!Ng?{#Js_%_4vw5h)~ ziTfD@h%p<+RVSFms1C4uMOYa&oI5HAFnQgS3!;Pav)ot^hR6`d@PHs@kG5F%3O8(}UNuwI5)cgHbqcjSY{rgIr%_I3IoBX!4;%8(I*t-IM0B#~hcd?xW`gi}V{EBBX z#9zQ-z&}(z`UOKWQZ=a@pFPHiRS)X~5_1f0*_TxxR8g#U7VNP2c$KSMhC^QA!+$nsfb6&1{Ij3={EjbKuv;NW zvWsP)7UV1Qh#~!L(`QjzHLuMJ2YK$AH>8VRYBeNlX^ot1`H1~O+%en#n*(P0HL24hs!d!BBY7E)1j&6R`4Wis+jwqkI4m5Y$B z)x?k0as2dd7n@)(V@<_$?_UtJS5-~dG`0c2GtH2{v-8!>KfZKq);x{CSheD{lv`I$ zLSylX99rAJ?-i+Qu@b-@t)BNR*6_uUGD}uz?UvH0V(e6;bjp~06B6lW zf2GLLo~6H#xR*3zqT^c5ni9r)=XT%GhoOy!a6+E#!)T?xYQBZdH~$HKOy*zLG~$VieCB@K zA;k0_VI9-OO1@5|?NVtCl!i&+Q7b{ct>9xEb7;67>gUVAIf|qqPQD0S7yN!;D69a( z&*5Dw9IG=6>Ly!amDvEQ*o+{DrnIacCDf5_SF%5tCLAW;yGTVSRQAhnG`gTGZhNmX zYAF_FTMKmF4(tgStVJZD&N6yqFMPn|-;K0}b+to@ zzL&jZT{u{AxnJB=hlUSp*<_8jdtv;V=xY^uKr^IlTn*rj(!Oj+HN@0K^fVuH@Ici; z_t&R6?fuA#zC`EBN4M6A6Fx<%B6qoC)&q3PYOEuARe&l}eBv8ZieTxSRlFHexmrQJ zMbY%=5RY=#?o9;euy&8imn?!P56GSUBIk$xP*0(nqN9u(JI7&f0qCX-hdu#e=16FA za895DZmE#cfM8^5=1QuYot}O>Jw%ReBMy;$Mq*0XcPGuse#=L&;@(%=e2D=Ec-vPZ zT+dNo0t#bea(ioP%sw-SclEerBXm`XRQ%#HADqbJdv$q zaH{yZ%1j$|QrQAO?O;8sRL3{P4P0A_n~6w}7Vkt?d8FsK$;*^`CxJNrJQO~zl{8Mc z8| zln;(h|J;@GozriRHXkC0yT8gn6Q-p$|hsrI%S_`=}vT5TKEg|D>O6$HzQ~(M3dlP)sw~NBis@vp9;B< zQS*`<;6}7Y1!6szu?6KYJnP}WMo+=5>A~FoVJwt)&L9Rt({*zhRS1nLiepWArx-{U zRJV(_fPly7F|c8Lw0K>ao_Q~gB_R7x4{@zkiCyMomyly~jR-lRL55DX5^=kSXto&! zqpl{vm^d&MdVpe*uObx4r&1??Z|-c$%!MfC&@&MIG_WA@{u5G`L?}M8!f6!JRC%@S zk43#^j!4fR?v3)9o$ipF3;&5$O-|T-cdBoL@2nEf#DE~$1>!QLBE#|Gr!f1rc3cW! zmPKT8c1i8>yEf-d`-PKJxa6iH0XOV{;3}i8-iXS9a^{hyzubm_4p0wl-M>?Cz&QsP z@@o>OLio_}^9!IoXvxB6Oc??B!%28LOI#s1T^#2o6Amo%Ez;YAh2OorxdS+y-_8Is zKhQ9`#eD+uyrY@WxYY2FMc7qNa4N!RZ(F!3Szom&of8EGf9n$Vftq#XqyvDL4Uu~% z@7TaZa6ZW32d-%e4NCM2BTcl6Vq>>ti=S${h?)lk*6%c*j?Hw1HFHJWLugO$J7;69 z_JQmOQjS%m9M*a9k5w?Q$4G)sU5m+bs6F*sVi_4g>O`_JSE5pEip*74wK* z;(+pG(O@IYr0?m73uyG*_S766pSmlwhjkOt6_d#FxwzUL9m#vYqT@^X2Nj7izhxv( z6T;&xG_O%t)Df8Ra%A}%;qf#7wu8Nc+Yt|ZE@jBXUei;(-nKO?JdfaKv^z1DKyY1tp^ex${?6gvft>ucYH_bSXVWUC9gU z+)ND36@O%vB(HuT7qC>btJA{B{EcbMQH`e^cMbxcr0`Pqo#J|BK5V_W>55;uZJG~1 zGoGV^m6+`{Os7ON1SA_{rWzu6o)V*L{v4WH#9#2{syV}RNTF$D)_#Jvp}r44lfGp1 zl1OTL!8(|f+@W?sXSsm|=dky=Y2}M+BnXclo}4nT&~zR5k?*qGG&>&QB)$~;aaAK- z_K&+^rLP$?NgM0@sG;EDYMkLt2?%?GQv3GD4QsG1lRL73 z!oMi%5jw|zU#5nB3jF)VQusL!$uH^Yc^~^VdI`+7vuo1x58{hcZ6S|s1~k?qwCdkY z3T35wwF(6FBPoeBvvaxHdOQnfoUA?F-?~!=1_js61X!`=``eWek9#0N5+EV!MEg^6CM~o zc4n;MDmeqH*+L_KwG34xME8ebVQl_ZyN~!Yat0|S9II^3q(iOTzSfGW)RcAmh|yjd z@cAyx>AovKb6{(}5?iVHso>W#2e^#yEpakX&K1rzPveQl_}7B8UrWyoShL8yU`{pM z$E|HNb%HK-q*CDq_nZaro-ryEYFDTO?YErei<|WrG}OUBZI2#9#QWmzGkljEHX4c? z&dDOLua7$r>oP#}t*t)wO?Gn>mySEkHeWFW(!}_)O&VVKXWC9Ll!e-~69P@0;h7bK zP4Rl22I2Ul(=2!1y0L)NaJ|+mcq;AIG#h|2CAN?BfL7DG*imqHg)(ue9l;~|%tkQ{ zW~lcHQ2Cf{3vxeTX*9+c_M=X%PS0%3!}E}MBf0_TeMjZ|Dai!EhfqF_)-fi_r@cdd zhj>#BmvR=f`TH2$ul|IS=>R_o%~+I$v|3voa;-yKjYN&slSpf=kAtc@^Yzx%t_y9h zk}vA!j{}0>lZ=KrfLyQZR~&rXD!ry*80|8qRoA2|GVBX>Pk(LW0u2tcuxag-PbNvI znypW8^QgA?lu9IDW{ZR>k4!;ci3Y|UeE0l0JNer8(AkLJB7zf(o8me?9aYwOEttr^ zC{R3E#vK;&#TOx*2SqeCMoS<=adpDxKUc+VHotUCimo<^Z*QcS!f%nXlmT1_#(w7J4^EEXxg&R;VG4$V{c*Hp1jK7gx>s}JyBpyJy%50}q|>-BZLgTkXENZLog z*Am(@fx-GoS;1F!aL_oU8d7eTib)OZ#Jt7_VZJj*9h33%Y?NKr5tQnxb_VAZ{ta?D zXtfCy064{{@1LKUdLWKq|tiB+@J=f26ObS(vBxaJEdi8U>dTgF^WBK z$3YAha$cynRpNF!(OK(+mf9exN)E5DTLz)vlxV)0?3lA92kc6h+>l4-rKmmfTwFUf z>BH19@-AM|s}MaUz>2pDp~=e(ZQ&{LqUkG~L%x~iDtA>og;PtBp(HsQbyY*DN%ZqE z8gZDKjTK|N?CgouBqHTI`SuqEl{=LK>G#k#4jtX>QEefqTtBJakcD&TleeaG{L+?Q zAfg$P3L+n_CQX&y*n8;ncXoc5!k8R#uWB%pO>?`Lq^!51icsA+09pQ*RfdU0h%cst zy9g8gtxa3N?mq%=x7nwH4NBY|Xr8w8xa)Jy0LaIpYn5Hn*s<*PQP`*WdiwN3nm=e% zGRjY&_NT$&{GV@6TZX$IQ~4jBf+m-vlP}Ar=o=Sve~dn+|M}MU!4UZUzc(MTTr|=; zGO}`iW~8yJfJqdf1LqQZS$MDJ)G&f(U5>T%rZ&p5(~|lUg6)lF3xo-Ndm6vlOz}(^)5j&9lcn*4R}gEz-uICD$L& zqJ>K)v$M0!-(*Z@#Wjgq&1w;C+4GR_dj?JJ+@!ORV%->Xfnq{UK^~>WA18J?m2&_% z>`GNRoMyWc$52Xztifa|`%(xBbDNg5$P{Uq@YCvMMo`IWnZ8W+vN$A#Hn!EU0=_|p zLFYjP!|GLf=N4Vd554xAkec{_f0L8>$7Me~yZ;O{p8p;YSX$!x`J8<}y{(&dGc%C? z2&Y;&?E0ey8&s;ICZjvv>_q_z_h@CXo7FyXSwpU(%&|Vb)*7f{rG-keA9(MPk+>HV zEuv(}ogxiu^8uH*ftWfwYtU=fi;<+7L=jqK2v`}Sh-n2BOw-wk9g8Gybp%^&NJ{B# zLhT7yPw^9F^;QZ-&Xhb7mGxONv zC6X15z1Sy)ibnY$Te0XJtK)G{yS-^IEulEt}?JVd{^ z0+d$-@Y)Wp1PyKtO6_HK=bCFcCo|80-&^n3%tOL0w_pE0T|IdT{!3N3dyOS=bnVWk z3ms7XPFWUJbX;5nZ(C&DY&Wx#)a^FdY!%F<+G54;rY$4eC_}Vsk6%O0qMw{)(So`5 zx4FzF`6%^3XteIZ+bNlYEZ^vR5`tYkj?~{SzhpCdLg@fnVO7v46 z4l8X}RG5r)nrJLF8@89amua^mnN?x_VO-S_+Z7c=2RxU zJ!g&HLR`g(pIC&HQY~N1IhkZ3v*1Om5$0C(8mnNuW)p?&UpADCT{dz?+aPl)nsq!2 zo6C@>*rPh9uG72#pV|58oL3Te!dYogzxKMT(575rn7|%IS9)&6I9E)DR>ZR^jF2`5 zCs8po2S1Q@fXvJ4tjivnRxc^llR#xY7hIZ}W#z3})IAF>%FyBGf8Wo`iH9M~dXo|i z7%3Zd_Y=IC4x0YB*&Aq$iAeJc*lKmY&U}*P>eYM&JnX%q2t5BQDUbn~+0bWVgJB)r zvIlVE0 zB0uonOGRU`FW!U8ehe;SR9JkV?Q&&UhN|THY#H5izvaUkO8vDzDr|hO+Wc$TDs|uz zd*e#~k2YUQ@=$69rTvp-Qh5JE}>?v`F9xC~9G(F#-+$d>v|K;C; zc>MyJeaBefQ!ZV-MQ|f^ZTP1z|EnWH##dXlUn!yfh<|}Vr{@Pk%h`>sbbxa()I%|e z5j?-?el+xBQox<GW!aw!QykH#eU`wYPEbrrC!u0ppbS zv&6uD&^F638@NqNm{z+?+rciItPIo>74xJ`(RxEKK1fi%9_qeoka|-(eM9gjz*fGN zo37ORIG(Qf%RmmdJtPKApdqBX|H1Q$MQrdy@)r;g&8aqcr9FhY zK7R1iVmo5$=3rgESElKAk={Ko3jZyYYI==+O41F?(gOO=H=%|DHoMI?B8tVsaH>KJ z=;zrF!?B&To{)$L=HO7HzVA1-`wnRC2-SD@ zN1BaW1^qHx)uydQ_c4Op`Qkz#e--{ny9?UZN(*>vpY8)Y5*Uuon&;^lNBOi;YJUl# zXbC2pQn^ThzNH@MhM^LTH}tE0j)5i?-&Hlc)paQ3bCi)+#x=G09k|>YlhB%t_&UMz zoIYIjKlX;FP#=L8CPG6?o&|EFvpqq0e#w;CXC|s!gF~^7#q1)!s6QqBNY+cdy3c1d zHQkh3-XJ|M2lm5Ab`5ua#Dgd&dQX&g>KHkDk#96n=a#+Fa{IGa5a_2S|8jtr{BfRt z267#RwH$tYQ}};ad#l(uqNZIpjvX-DF*7qW+c7gU$IQ%}IOZ`k^BBfFX2vlyGcz;8 z&iCmY>FC$dzWSwZsj640d(};^R`q(HdS*8k>}_*|)^0CxNXz})7ILa-k`SRwmZ1iw zV0x+&VQlV*9$*a^PoVN2l@Ty;jqAyq5z%QhC0z#811ufbl z15Es=U&59GV%nmlF%{#U2nKULbe2Ja_Y4iLkdw)(S*Gi8=2}+DHx&9#VGWfcqdgVM zp;m3G-kEUf_Hq1tdd96_(*O-Bxz66}syC=1oQ=(^g)5V~fG+(aE8o8Bh?t!?4<+-a zLBge5&NdMFT14%p&0TymmMnOjEnl;d#+G@Xk?-pgII+s4`ASpkjd7Mm^^rwrn04N! zHeuJ{$6+1hmN?+-qKVJp=^<_&`(YUz$uLpHD##a;g11m%|Kb7DXMQv_84tuurq_SP zT$ebd zYrr!US2Qe9NU*QBw3j-?VI;kn*8TAijVuJ2-F4&vXa{6 zgo8)C!I6NRv1-x+{~7l5(IjAw@VVck;U#KR>nv2m__c*jXa!yb7s(reOy@J3ZR888 z^L1@Q2a?g;$UI`rijW!m`si{jn7<~)?wPR~d8Z`FP{Ovt+%2Mq95?dBtW4}~hA#3| zpkbF*1)VQW1 zizU56_f-^?F*iHoIonU_(~oam(?FU%0JAI>*`qtXqxOsY}3g{yr)|3P*>)GIX8Y<9p2I_jf)9P z=bPf*XDDdvTO*L(*yI3c9=G98SJ?Q`m$oOq-(2?XRg7mzM*ld11lXs=vE|zD4z_ly zip`7Rc5oeTf7w*@@O+?;=_2o|?N)GXt=BGDTm=2`HY{*>kSp{n`v_D0Au>5g^`#O) zoqvZq@Pfz0s~~SEuRhVeLy+Pe?CWuKsFTxDB zUeyQej)V!^rPj?W5#iLFZCF zb;3U6<#edHuoIGB&qsod=nSN7g;y8)Eo*IPcx(`Vqxy;(6EMhHoX1?z;2$|^xFg;1 zlrfnkq;ZMk`upuwI=>7olfa;vNlPvQbkTNz7-{X7 z=cY24>}`POEPCHRK-&|G?pe*MZL`{QxVo=5GXWCn=A}E@1`nzo%Gq_vTKbM7$4chE z{6H^ObKFk$b6hM@np^=$;!>U4;eLVrCe;`QqT?_IvlJZZU#^xA%8F+3=?uH%``&|q zheCg;)@y_pgB6z)zkBQ%hFfs9ZuzzSQr7Fc!CExJ|I>yR^(_wl5p$k#=~bZIAFqgr z?F3ib=(vDJu~9q2JiI4bgoe!z?%=+05hW8C!%G+P?=wP};8B;eFTFMk;NOcWJ6B_~ z5u#1Ks@Hy-lX`c>vHFbq!Gnv@Tzu5U5Dp43x%g?7ZS?1Ub<)i8TiC*VZ+nI?qw7s; zTS*7YJa^<7aupm{ZAP@$Gix^Pa4HM~h};>iH>L8R0+_FPRBf>9wGj%jok5qAhRxZQ z=j_Vn?K60}C^5|bU}zvUiTBH(&^LFenYhGUJ(ou|oq=Au{mCg1`@d#>0Bu3l8!7U^ zbYzC|{**%~N=2w|6n5y1)0AMdxKp>b%(LJu^2Gb#+0hCfRDzkSI{d5(UpG}XY6f@4 zXlbh{dol9&diH?8dp}RpPO(=-V!?7#_ozUu6uVszZ_@mr6h3gk7hXi}IOTcp&0USS zJ9!pf30TEc;`K^xF@2NPX7L((a58!s!)Aryk;p)Da(g*BX8Un^ zE&L?G7Wo45n258Xhr20oIgGY>ri@W|y-PfIS-$aL{7ZcuI(Dt}5kJf5t`+Hl6RZl3 zymE#3M3h0!5qq^1&Gl`33jNd2-PbvXrQ=7fY3`83l0ogY>?^FK9W4C9U;|3vjqSSm zoICacO$yF>H|s-+vjx*sq1uCUBk%qbd+*MJ+1T=4p-uLPqwKP6u#AEILX|yRJ%s-l ztuKlYh&~-R7zeZP!=5UuRzDIjH-OGGqEc$=#p)*F9IxzScv;A(kJs9h%b+~4iX||7 z`vNK^&E*V9sU%(cp}0*c>&n~-7hNtX)p7ZPYY{HEM50z10mA-M7%h>2&jo+SWE7zE5x0yt;_Smpz2PR z%WKf9B(v0eBG|}B=d19yvMAt78Y{H;9;kS!Ich!@E#J!7DBCeq;)il-;A z=Xu0Y9;1`fsPY2}TK3{fXoWMy)rMwmq;So2e(Yug=}|gm;yUemBUs{D(hO8cz{OG_ zw(V7_8fo+_Mbj_V zxSEa-QlS_Q*ehJ%%u`|RkA>8nC~ca?;n?eHxzhW%-(LKpOsWvrkpxybJtIB*2#l=K zKufu)zFLU22mV5#ql0hjui;3n3f&;FF%{vYCWTTdRMfK5epcxQaux+$>=e@B#OF4M zOqu@X6*;*lf`tT6-IHGBY&Fa`tFN#lI5B`26R@~kc4DDcJ$*8Tl}S@uR~dsE&6oY} z6>5~n^%9Np#fm$)!e|9D3O&Ybh6y$Fe|Ad6LFgV4>Oau9juNW1)t>6WkmROcL-D3| zB#4AM5fwVf=v$5k6t-}e5w>LHiEu8AM2MlNKmpY{uB1g4C}K52_V30n+_ zhGxA;ij~q>8kX?SnX;c89M%s^$Yz}99%%->Ftc+zHNAXPxXX8)FALbJCh;j?EHi1A zh*VEZI=a8=8>Z1IC?pJR}Noa+Igifco%22JI@9(wvrw z%^f{}NpgRs43;rE9ZCTpT$j>n6fR3-5;Tic;1BqeEw@66lc%sfl)t;{%~dvATxv|O zGi-e9tRBg7lUJ=#7V%fcds!I+uh?`RJxZ@ruQQO^MkZO}LZfit+S6||zOZj`5xqf; z!6%Eo+eG#@^M`|6;?N!ouYCIQuazFGVy!*iJL{-$$ch>%Af@g^m7@6_6-a%I0z9cP zhYD0k$U{@vF2}`CXG|H261zcLNl2jYPo-AJGUjr6dxq37^`hWGZ!JYpHq)+mFR*@} zpqo!(3#Jzaejlt=m$3uO)wqY}-bV>1;tlU$$!65o)U-gZ8&R2HgEfffiFH9b@$o1h zg^crEug$DO_*gbl%B^vrlq<2Kcq;6$T(pKhc1G{MBJy}f7wf(|+Nhy{FUf$%lq|6$ z4&(B7Z5B5Gy@C33fc&@(SLoQ)SagH<#dl4u?FNP2e)7(7iK|mk-*M82XFM0!ReM^9 znZ!(XZK8(bWP<%6N1>s8utKQc_)R{ZIHQu-NhtY+ew zvsxWb$wqCD3qQ$y=M7T) zCK%(sEp_*<^smw@Lqka`@t8@fu^M?1ijSOtNW`z2YI7PcNu*yvW=wN^w(|HQucCVw z1v#|=dtPZYawUh9xW4`Ba@5fTxFM)I_<{hrp`um#K`;NEc zVetGv?L92^|C1Q|3XXAvaE|v zX(%9T19(OlG+dK@&N`#KYkmwLYo6xXx(#b6ICVgsIrI(nnfAzZCP#rv@qy@hV*(1;@tIhm%YJiL%x(ra#zv)SbN z2t@uhQ)MmxD&TX}yqC5^t>YYSf!|kvh}Wq?PG}L(BfHV{5LCE0yLes2ZeKm?_pf24 z>4=S{a;)I!_S3ur=|fWHt-5f6ukz&)9=Sm$Q>KK!T;^OO*l%tg$#Nth@|W#t&L!GC zS;PaGcxMlW?W*20rD_0BkS6zPpr0Ihvs44(0)L5BClmZ~b&2;#@$!9zHn3wjVHPIm zk9t`h>jawO3E2oZ=N8xzyg_;sK3h0e=@O=?BP4MRBa062-@6*#9niN?4awQwsD3`l zL`_VM0du@n&rt=0^bb6pX!$4lLEC*GEom3}g>-G~=$3Q#Cz1OR+>1v3B(KC9iWA@H zJ+7)D(J8Hpflw)Kzjz*BRoS2I5{!0y+K^q7W6;$B3IVhN^uNgjNXfhk^!fGlsgJKx z+;~zXEd|KLJAHyby$b3JWq49t`?Jg)lq1R(PNHFaztR!49i*g$AehPgC&6?3$R5UG zbHvm0B)&M{PyB#$j3`$h$dI7V;`Bs|FZiq!5M&?Ej^{65dLF_m7~jBYapdvqq6atO zA5x0~WlcIzSSb%{CcK9DQ-$sa1uRiz{s zY`NNM)Xu!M8{I6+kXA&BCHx=^8b&Ib)$m8&FkgDE9(Yd-&cx5c+wLilT3-syZ1r`%xIX%Os+r$9!c+Umr_ z=^21nR;3k}jY_LgjGPDE^0&ky=C)Qx-#6c81cO>e5E8-B7>9ndJaL66B4_b+d!4*~ z_R2PMK<)&eUjJ**Sj!#JK z8_n#TWfW>|bJ&=4_JlejRfO}!UD{1I)!<=uj=x(=bIR(ds%lY`D|%i5=T;U;TaS4Y z+=#uBdabmM~u6qxmG_b6;Vg>kC*A(#S5mo$xIwoc@HpgK_ucG{ksBS{i*k^}lR zhB~8>YbX+X2d)<9fmQ0yb`Tr6Jdgw1#zpnuh>T%qBg;~H8|Yc!bf;z_rn-#z4TY(t zjKh6o`hrBLVP51D>SQGwqtD?{u21cryi9+=mJq9lFY&|Cf{|}xr#Bk%h4?6+XN(25 z|7i9{IwzRuWR!?lq4gExB&3dlC4^z4n|R#}&%oc>Oa#cgj>0yAH4+d_#RN|JLRYS> z=1~a|5|)|Xjxv10j6dxR3Mn!lb7&TSTUEAYrO50zUznjYGt-yLMv?PE9@21lcJW%K z|1kgfPO9#8y?9yFcaW`N%9G8nAm?b~tz}sH1m%M@f%%HIC25`6NXw>zcQa^|UU{ZF z?~N~#M?&^Sp?%Y0oVpQmLJij#{kz?E+5p%#k@t-4BlB)c`@{H0fCO|UOMEkUkDuOV ze{z89zQD8><^_6tI@>v%h!_IiJ2^D@Gc7J;T7$Md+~iPOf6~MM!1=Yq!8 z5*!*3qFm$$@9&Q;P4~J?1H&l%X0u%}Ky6EuyXi)cxx;UKzAvbiVwU>Dqq`!SgB|@5 zsmE-g?YT*bD=?Cg$UY9&^-LyS4`aES_(gd%0fd_%XrP`{!)&3h<_q>>peUopg zws(T&%)Y<|A|c^Ywdz8@Psagh00t(9Hj(!1XFAf;qN-!E35NGXf)Edwb-S-ed260e zh^Fi2T&XnOJ{UY1mDN#CZ;wiz9Y3G3cRV}x^hwi^>0k~X7ERJElEsHB0>*SkQs#gu z27SmZ-QL>q-cHlY;Hnx!mNgjNhf87|%ohVIt(H*Aa6PQfM*_^+{os$T{9VHzKQAPy z*Aqx|J0#jkA2_2nKyCpN&b}>7FBeuwWxE$Og^DPZT6IS11^h`u&zRW;HU7xTy|9@* z1w8A@Boj|6LLKIM7JkU-+!G*~)}^{Nr|-Hc9hVD}LS&5=HXrFxs9MCgOj^aJI8?a| zaUrm$dVwC_Wls9#pBmI<0i6JfWeRwyKFyrLa%Bb?|Ka`jjdzb_{>9CY4KB}!mS4#< z5z7jBx3Vke9jTM#=?7a&BULCsPOQes6zdA_(CUOM0^iihL6*U1>DjD^76%X5Rxi)5 zK2Av=5U18}SYdccM50`+4!LWP$}5{#i6^`6llCy!sSVCQFYZgVea5u4V*(`(r)K_| zGYEV!&OTg>0JLhNEofh($z@x_}ncBF@R-(;8!%u2cyuO1G zzXyp_Vj$?*U{|B{rLW}ZmKfC&B8Z-~^(}lZ8*SV;5~u3YUVE0+8|vH-d}6DicCc*NTZ z*KC$HYLm=xK2~swap9M^lZTOEHw}|F_K~3#Dwxb=s}8IMuRZ=?TV#83iI9}6?h#X^+cY@dHYqKV##maz8WA*`#Oay{r zACdRM*zNqGprPRvSf3>v!yJ*p-S40}&5JEWjwf+$w*9E>P4mKDT%IVUd{U#GIJ?1h zeMU;k(r^BlLfJ9ai~Y-Gw2X;1OH!36@{Jpib&8N<0sqP^KGAd`^CtYvsyxJ-yiuh;O@UUHFzS9%Dcb(~ayb>{nx!j)j= zHqXEMueftAjkVdz>YsL7J%0J6x18uYQU2*(XXI&TI&*1{H2sCt@wHz28rG^;LVMUd z{mcmx6<(5y-^%sBhlSO~Za$nt&h~0kG0tmVLaM2h0>aBEbBP8FM}NHpAKFBigq?2A zw6RzpdS||j#B4@7ZVaev+cp6XFPbVJ&i0xGR(IPSdc5QGyA7O*J+qRs@-fc43Efd5 z7*k`m2+PWNX? z&WRcxSy)+rr#Ey^lMT202M{s}{CT^PV6lP-r?>_?*sA3- zrUZ|p?YdpPtS6B1AM-B_DZ)>iF$(oboOfdCJEwYFW|lztA_vaR1hxP$R?WqjM&w$0 z*wl1-sc#l;@YKcxPd*QOKejOsB@T2yo#`FA;p2Nu9CPO~TUIK=}=P&0EyY zXZw5FNs^178u!=GY*j~Hg1jwI=331PTuVlA+I>7llu!=@2coMFnIedcmevzyH>lEF zW70Lw+ocgTI@+tvsZK(TgeB-bm=uO|?%AG2IR-X)#On#_DcYb^RnHXegGsiQ zIv-I9f3I`Wsu6_N=b<%78(2I6nm6%3rga?y!!e$z@ew!iSE7~+X4966gW{EW3m@ra zPLN|U(^7`89vMaM4bJh2EyvD<6}-Lyc%h0x(`-uH&HKJSLXZWY&JpJ*g*VDnqXO`0fQvxon2>M%Vg|-#=)5;zP|~?wPnrB zQc_*wNR{jut~50_e)@Iu%o-H`z{Wf|Oci#QuU6y8k?ciUEvyK=ubxB=92M?wJ%X`g zFEvW*V|;vI-3b)whQ_j-fTpG|KcmV#G6S`jRGj@a#;yp0Yj-W2<{vqIC}HSe7UUa< zYLM0w(Jw0n617K^xM_#}3inRd-|D5uzoT0Te{nY>+vcFn|0ikJ?R2GqS0J_K*YHf7 zo|6sbFn>{iO$i2!Hs?&Bp-2}g&r3$w#%sT5yBEU8RW~VKBB{n%GVLJQ-o0qgGTI8C zqRSD;MeI(9x3g)_LfIlkxRk4`{IT*^$c6o46;yiA1mUB|j7axL5^HbGG~3db(-vgR z_$ByKU4F$p+Fp{~;}x-*!l}2`4-EQ2pDL?cxv8YyHx6&8fBzfTmt{~{KuAM8{=bLJ2r+$QPIg`PiX`a7E-OxBIKe~K52XlzN+-v zpiVF3?2~-J0gW+P2qQOr+~))8O8EBjHoV5z1E+l=i+>vj?DB^ln49yI1t7?^BCUJ$ zwVH+^PF&~1?$=6hP=~nEPSGs-b6Cj=K7B4hx&&doJnE8fY0sXy5`I~derY-gOMKxZ zyA$v;qYpw^sqU;fdz3_l+MKe`rozmwuxP7G+}-CjsDgBS(#&&IoD%M+p>*KOlj^<9 zt?`mSKKII)VR~k9__Y7H;SHZxm?J`&DUR>5K_%MCO<47}(6!XZJNL@&(&or}iI~kieK@Z+L->AT_@EFNzd-pTs773WsdGm&&iwgjFGO*p4dIg{W1sxe5;H7rlf>)LPMdjQ85@btm`YsoD5R6E(GD}Bo||< zxH%E{v8=MX`d$_)m;v%m+S4ulwiurL21=?UKuO+^w6SrYTtq$-Tb(3<4EX+st!Zcx zD(-=W(?)zylc?Qz_KOC2Qh*a_vzY4xHlQUO0Sy7fw`5nd_dTlD2(um;CnP^1YLmD@ z_M6u%Vxgko-;e80JfeqNB^eYF^H_c#=E(5wwbv%2zNwVch@b0gt8;$dAe`>g2;*e} zt&CDlwoYH>39t(K2{gmA=GP#xo?%IZ;=rqpmwxiHmXQ8dwc;WLI4$)Y>~+Dxix)e+ zUO)J#eAxAAD$|%+5aX^QNvq%252_giAK}7|Uzr-aVfec{+f7=X@EBMq9G$76F5Sd7 zmZuoF65=$8x~uLP4`I)kwSJMa+A))K_+pc463xaR0E=K6i-?dX@(n5p0qXuI0-3?B z!)m6D?)*wLe|4rELMI;#8g;w9@=fGZj1I@x)ZG3IF*?MZ5|2K4{|)YKe!#K>ewUwN zSO^%&o6|>%zo&8IXZG^WQn-N^u$ATb)0F%v4gOJ~lbGKk86k;|OR~mxdHY{l;5IfQ zS0IAj5e@p$zENr@_>gcH{<|J&m|ZD%?>=m9*7O(j(`4~>?0rk_WVx&4!?GbXn8PU# zR3s&?3(;BeE&eUHHN#W15SqnMu?IK*PeQt^j{}|0*CyIS5TW<~!4kFZKD!nB2fLO3 zcm8j(L>-O1{%}IiP^#g`E3{46fCN>Zyid0oiAhFaAyPUMVW~^3gP^XnQg7=<` z>yUc*T?so>;QTRU?%WhyIbyiq8O+AKD!Uo;8MJc$+$JHqaaw(n@frP{9B*bWHQ9=C zgwo`@=Zamu<0zuh7$)@m^)k)H`5XveYjX)}XiTQt3 z3FR6sG@NqTXOh;5Liex4$Mu)%T;MKB_4IEI zEipe;1#wLDzONDZwX2_XO0DmAQW2S4yWk^w9S;$wdxr*`;~*99y2alzpi?sr?A&OH z=6{1G3ay5qskOo?(lB}{X5tKplhBtdrH0VLs;*tkrNis7Mv!Rk|4fmeN&R}qDmam5 zaKuBT?#t|9aQ@LiVdYEl2h2D<;~6gBMeBq}#}R?$ZE6%{Eu)M(@NGj?#(aL?7UWot zTCUPvjqpRd{`ZxA6oOP}1mBKi+Q{}OSm_!LHrpOwU;_VFYjL7BcvoWoK4{jmbv9UU zMHN1+^rxn;Qrtu@f#QsyWnAsr%+u#05=PBHqAcNks?Vj-QcZVcT74xbrz zBFT8~C1@8;tWT2s2wq6x>7kFSugDKSS;Er-QK(8Xcc!h zUL{#H{k`yS`pvtxS<}WDkia>dB`MBU6O3Is5#xgV=eC*tD~YHB0%IwC-yIf&MkF@Z z2qLT-E4}Data5zrBQ7btT(K5vAR|6i_l$elTKGPP+lsl3OY^ZzvW9YY{YEG%^T7|s zK-(wws73C*5RZhS_0eb;F=YNRFe<2ghq)_ zN1jGIL<<1dQBTOwV)Nx?VeY;yX>qHs|H{aUDUO2LYydiyFs)AhtB?2bP_24~so#fd#yjPvmEhW@~5;XBF$ zMJLGX>6MaoDnj&4eqadHe&?oH`K+&GfK}vk>1iDesR=FVM;+x-;Nj$pOI}md?FQV|O3lrS9Ip86T zoE0krGe@mQui&LMm0+x6^Nd9@98O=x^Eo(1c1 zLw$yiCtRnvpT!rKjy&PgbP85S7+=51>m&!}Nma~)xcR0nmZq%`{>tqjcagA@1^}xFEUZIf8eTR<3Z=TwrEsG= z)v}Q^5=}D1Xw)IRtt1y^&i&pkBMW1J3HEOaZ55_udyUt;A;U%aid!Y?9_P-hRn_cZ ztkT;N>Bo2>K~<}A;c;5KGWk_!3U&i9Htcu*S?{^BN7lG>3xyo`SS0M$b;_oRE?Q{o zSxZ28L;{GV{ImHJ=3qJHT|Hf7&B~847I;6D#6&XG6-hpeYAncIn1u<{YpaKXYHg!2g| zYf#ujQxgw`7(sXvUL`C7qrNpz$}tE6_qpzq)R~~A$Lp=vzN!FFq+m+|~%hql1J(K^3(TrjSri>8vo_zC}&4>5L#}Kw*9s_Mm7r2FN^PS8N=X8-@~{od$huq)S0e_;Q8YSR}Q!OR4;8U&AVcJ@gB-*m!=s8e1FW-C#IGqxRk)$JrF`+b+&?YaJeegsW zMYCg-e))N0PkY!ax@souVk8yL>uqeKkAyVP`**8%@H5dEc3|ok+JO3MqBmZS^C(Ksb=b@vFr1P$g=9PnDpR5tcA-nZDH3>SkOnE zeC(WQ2Udnc0!ljgzT9^`?9Z}N@d%nj+q&+qUv$na)|~f8q~!y@0}@U4+njjRb;=XK zF8}(bjZLoL3azbGZf9lUsvkRQy1y+zkL&}{`m0Q$g=bk+KUelP7S`_e!qXM3^b%WJ z28GBqi|qLBiS%&#pe?0M+hU1l?8d{n-VmtJ+l0kMNbo^DnKn|cI6_4|?nqm?h?Z4= zZdhHs$U@ve-PBt{tt4W*DG)SxTVA4uk(Q024kDeY&DZ(Q&}>Nrz3dkE|pJRmjBOM3tYv z#m6nwVTs>`IqT2v+NjJ<44$m04L500@Kufct4rtyq&wIs%7)}HVg{h`%zuY-keF-*PGHH%%QLLwH&0en6#AjnY0xcga zES{#)N%=427Cg`RUM(Z~7jm}moFJxe^s+W4>q(Ov{>hC4m`oj0^5fZC?dszB>-yTG z7Rffvk2iNJNL7~>HUMpJhcQiU5Id_rt*p9Pk%d_rX!N$6UNb2zJIPBu?MPcYkCsh< z4j#lhQwvs6$Gj+0`OnbYfBk>^rL{q<;4*a}h!&uuUIYS{sPFy9Owv@>o&~|FCy7_k zmVoH3{{;-^Yp9okw%_Ld8(IJY)3?*g9Mnq~3#VO2Md+cEqnY(c)Kh1hZOV7h=m}_> zmg#7fZPE-$k!{i8ZAeuLkPQ^?uI}lwG6Fc1@buibF4jFwoRy0`#L}=JC;jNEm(hcF zQB+TVe$6zYUPuE2YFQlM(KfuYi4KoJvNKTW>4|mwK)PDFXf>MGfDi-FfGYnYMY+f;dU>6ywCk6Lxqn{uO1OqFeD)#Wkki)8*|g8pkt%$aI? z>!UMbsKZ}-H<#Sc#0Qs4^V7iybo)GW0r=aC1hj1_G2*)eGXoT(H)yW!#huE4=r<8<2hG)=MImVC3fU&_iajn7zFC-nha z!4j{a`fRfU{UH#->ypM$muee@e(4bWKRuo%^%*tg_MNb^u89HO$6RcUp_zpT>O{^d zt;9PJ<+TUR*U(>h3D8DnNMco1itK+gL`6a5d$n{S^igef{+H=vL5M@0==FVPBaP@a z@94nL_jRfGxG~4?KK!P2!vAAMF@3mtqO(r$>2xM0;^KW5z$BDH@za;}E#Sl7Z_9Ab zO0q8C!%VCHLhwaNC=E0ve^NKPU9Zxv_MUVE-1D|FV5*puh4;1s8t4F1G){mVI%)Jc zrcuNr(AqeI`9J5O!RWE-zv({}KG^I{8v^TPG?PMT+trp!T$QY{r@4S;9ffaGLPYca zxAdL+TCfAM+{!&eKNON2TY@Ah*x9V?r+aoCEUl)83;zkd6B;P%DbM7VGhCUEEU$Bp z%%|6Ob%Uqv~*vSf_s#HcIJE!A;8-#z`T^-IHXAria|ZTVYI76biRJN>V*%EXTaV zYsxg~^wj^TOmQp-%Ai=K-ExQZkZIEG3D`4$eUSB3>goM~nM%)|X3n~18Zso8&ranl!*O7pFzt9c**Bgwp5C4?9@`!^9^8f)_ilF?cWk%nBrR5OSEQCd zmT8qWfv`cIrHdupB^nI(B;zDSB>p5CB#b0qN!E!Qh@*4o!cfD$eSa6}_{r1;e=O4c ziP1LTA%^O8^`z}jV+ zE1fk&oX|~4#MWn8GQ^y)MM)-So;J#vX2QB)8a@Ff;?r0d_$@&$CA18+-~}i z5O~crbVwmVpR!f{KJ}EN+0=PyH)2VN_%X47azHLRBa@lO%6WLVF4x9+em8T-!O}+A zW@$}}n>a7*W)EF7Ib3(`<_(c0m}8DH=xEi|S(;5P=hN8H@9WZ6YgyI|8rMLm z0aKVJ^!+-o0z35mx^mU2mNk9Gr4Vw!5T+`9kq+Xwz=NkSE6f=NbFI<3lEzdsmKEb@ zh#tTT^CK!7vz^{s_qOKHW@;k86Y>f02lIu#!?Y6u1u(&sWI)ozuhFuoSvUR*ktoai zdFjKcZQHPDTr+Ci41ohAVV2Qr=$6+!LfbGj=-AdS+Sl|N7eg2Ue=$Sp-F08teCY3G zo>mYVaE#ez%~Qu5Gf+7R&IS+zH9mKL6eh@9WH55-+qEpYHXa$x%$OIBQR8VlL^a@; zu+5uij+thhU~#TyC7zA^j2DU390)Q7xWO_m!ez2vv%j$MeQ&7J&Kozr?VO%S- zp4-A{eLD~$4v<9*{@YpgcK9X4TCbtW*lBJ%{gcB?PZ3a_E@Ph4&UV?VuD&#hi}TEm zckvc{WH&QVR3$IeTr#y`{(J0a25-g{Cz>tgvK)BHzINUoh5EJfz5ogL#CCqgwtC5u z4Pp)&W6wC^WV1Dw;jx)r83m`=u??74j`3#@aAw$YuGrQb71qp|XN>t~eBp$*!(5_o z&^E4}HD5B36t&6AtL<0F*yE(L)mSbEYunZKo2QH!Wsr%q_A6(waa!1oujp0tP`cT> z^z0Xo(Oi9wn&CXKbzeEHS+c2}Fz+0L%J|C}+DF6A%kAoFc|O^b2Nb0X)d_V+IwK%l zMEn-oKS(w!njy(KhTp=gSJ!Onva}yDrk-J=a)7&FXTP{w*KFppvL8LBpW(y#02IOZ z@-D6w#R{EAvcq5Ib#*yBpX|*m7G)~V+a#VSYA1N+zI8i1nA|Y>?duiF8!C+Smq5ZA z!3q1gx+iblD7n`*ln1F7KZGyR5gW*&;?EoDj=jS?Zf^?>nw;1@%gYsv;E=oWsK-A!mjB?FHOUdG1b*?zQwHhGrl& z;@k7BIyId*Dn>{aB+1&Oa59eR>onE5^6WUyZ)Ns+(J(~8_rgpMF-;#czOxoGWG zFB{4K?al-A>x8-?oe*$x+qy39RxcR=dNo4rw!h-85V&)nx-RZjuNcMj8ie|>!l%?T zt`&ARm3!cvv-X%3j!>seF`DT0w?w)Sap8mORXn1u7}4}7TWlS*&%r%rz!BP%IYvKy zZ_8VkqYKt9vs~G%5#p3?Mp3&V^*^NLyY zh+c{p^LzW)P9E+|EfnNP2E;%SLJOQXuX=cGUROhH zU9yee0ry20J@Yg=U$M&m-iX}hWlM>n^|%vXh1qfm9SV~ z+%xlz`@m^^CvZSq8r;trb4JL=@9uqiy?9nLU{W<881ts`8TT7Mk~jV}_o-9gPRRg8 z%m|?-f2H^6Lesv}+D^!Td`u`|41f9)`-9ci%(MeH_rKYL(XWqoXO@Zqikdm|f_A>k z&$G|2H`*mrYsI08ia7>??S9XXw|7SeGaDAaCrG3I2)YUW_5J#SaD{WsHfqr{@jWNb zU$omOsh>0lA}H&d{7iqNea<#uQ8~e%^CVctfjQ{wSM)M^nRcXl!8T&iFoBz+e64Cy zH-Vi~Be>(+{~~v(y;VDHku)Lp;nZhTQ|{_Z`673fcFZPkdf9y8vT)EaL7*(V1DfE- zIT7UYJ%2iV;MsJUJcykzno`;MCsL|`$=MO4@>O`wyw*Oc9k*zwX5ieZow7)nFwbES z)b(q4vVB>2sZjEykuVS(9KM8AGoX^1Wt(O$O@+T zRZ5M0Lx9I7tky6Q>l*y63{?Q#4-<)qjm0dOYxA=)H*d3Ap$X{_b&@St^XJ8HK`1}y z=P$R&*u;jqO-Lp1*yzmMzofb%f2Tp+LDhaXA1Mq<`5g=e_)Pf7f9th7L!ayQ6Yx8^ zdnc^dv!?g=Je1w%<*&0}>)$4+$#fRg@ zcO>7m?5g>_14Z>&;p?h3Gq)UC27D8yBh99BSJH1)sAA}Gm`p@&s?s@Z%xS(tMX8Ng zf08%RufYgNIJnb1rXdn3v6UoiA}yh}-;Z#n=?dMXGGZIH{^ctdwH)hCawf_fim}=(%;>cPpG}T2uGI%-Cj<8~&d2pO*cZ~;wV|z&?MWRFS;ql;* zV%YCP@xSw6J5%rK4L-%rk=TgLhm^xx71auwiVpV2%9CV>HU!xsE@6TNOeF`+qOwR@ zMchMAVVCg0OuMdwX|c5;Vz=Dej^YJ&ePn5VVmD!!s2+tqzz`ri`4fpI2O%6K>KPeh z_*W9mf6uOuJt5)Rjp`qZ)g@^ZwI*8@uZy(AnBvVB?fXBhy=72k!O|^AH}3B4+PG^& zBaOSeySvl4yBuiT-Q77j9NgXAT^i`ez3rZEj~Nzn=P2{7MO(rzQ3c4n#P1`I31_JDYvv>6dcr?Y!O5V+ z=fz|q0R$|RMpgrj!)RftymRDcqM+!~H~=OKpOM_adbp?30!x*P)^KCQCqCpa7LyqV zu%VihPl{!x)i1T7I4T-Mv=TT{?ph6SnkUB+8 zP6G^C`i;ehqa$#M*~rXHK&5~W$ItD#Ycc@vi^Y{C$OvjUItCYej;B~!Mtw9>t_5(( zB4Fe-bRT&_I7eMN_bAQ0dcpvlr;d^iA#Nym0(x8vrfaGB%5a^^n#3Hk*HVhJk*Lwluc zl(R^3kZQLSu@&hEeuO1NchM1rxyvpr4{$4y?ztEg3j) z7dQ$X3WNFq5=L&Y6X;qLjWQM~4y=cjLuG)uUEf>5Lmkiy0G$^^_WajZZXcpHofOOLKbSt+NHRLg0o3w#Zht_PD`PO0THQ~+)VlfwQ$ z52Zwq$4Ou(AW-78jF!PkVdgcI0S*Pr!v02Yrm&G+rn)jcNMq(S)B!#R!@{DYQ&Gyu z0TP*2#jRwd0gM)fF48C2PSiVgz^>pwu*iv_f&FTl5Y+r=)yk_SE=Ifv_4?0c#)|!5ZP=DU6eZpg7fH*kS*jviDce`>-P#`o zno@VkShU}iwTjMVm(yy6cD@6J%lu=0z=om6QV_^;r#kTO1V4vyn*v9Yb@2+6#7m+j zaeuSYn>vieC+E@jD}|PvFXAN3k{6jv)g*7y`YOE^-%Fq5u#(tvbMyBR#r&dmr43Rl zE|QT?PhjOUHIwok<5v8XV^^jsl#xkKX5}%J8kVe&UxZRO@78~OAkRxZ=*5t767dJbuNL>DCNW)mOUf z1IOHwMjxzHRSVQBY_3$&$J}HNRGa0?bL%%0Rq58yDLYGTus_X@f}iHSvfO%Kxli-P zJ@K2+;!!6_^Uz6|EUzq0-i8OLufJqC)~ecNKRc>=Wg+s8yQH2~PHWP_OJ*(b zPSuK=S24i3a-P^Mo)pb#m&D2GduGnR;JuqtR%DC^WKJ*iZN6 z7W83eRq!skRG+O+x0NOBrSi+1uFtepILjY(Pl9KK@y5BxoxZ`AxX7Jl&hS(?%N=#{ zTb}96w-q|KeG$Iam0Gr{mA$pA1sSyU!3Cv2RUoM0pR+QrnAENqMD-yBWkKB`9O5%D zYgp8d^cvOmp#>E|=_0n{x^NvEPS5403ELOseK(jJ&aDv^3i^NoM}Wec$3d(T2C!r9Hz*eF>SG96ff95^IK*4$@-T3i-A?Ya3}S;aN1Vik$mD7@aG2Rn?6V6J zfbvFs#69QYHi$pU?IR89hmt^u#UnsJN07tO!77dp1@Bv;Jt*(v2s(q}La@VIXKl50 z7~Zb#69{4@!sACPQD9eZRA^LbRBBXfRBTjDR;EwQv#2@F#VN$8#L34g!l^ng!-Ips zyjQwcyH~tdy;nw1MNmdizgI_4c!LMWbd6+*M2$>{jE$6woQ!0Qtd9IE%O&P6hMevm z+stF-+&DfXC=o_L==@2lz-Yj9#&E{G!??qQ$AHI-!id5YGK?@P zGfFpVGYT;(GD4f?fRQmI^t4dZk_Hp4fpJ18*dI|x4bV{po&&?2?KGZu?S z>(C;lz%}-g$7}yMeeWn!T}MGjMMp_TO-E5jRYzGzeO=-6BYU=scT;zpcVqY2hk*o9 zP7ITE6vMLwJtnfTO+1SS%K&5L*ax6wYy=P00U&d43Xo`Q6>G^;d;~}^Hj8~FXmA2e zvQQ8I0Zp(l4SzNgbiN;-5gHC40C%20z)7~|M)3&}GIxk2GMqz2QCMXFwQw)@FE|*fEAJ`3W33I-)QXNJIMt@GmMcN;oQ9{BKPnrlXFqi) z9P0)s(bA-Js2tk{Fwr`8r6MXQs#7)S9m58)(A*{0zd6PYbfIb6y3^j&($?;=KUQGSrzNJI$T)HI^$Q@`o#l17*=Q{2=JkfX&(@l)Nj z4wywyQ)DN5NEspXf4hkuXh55iBzRmItUno??3^O zVY$%Ej4^lkg707}V69=cD0RNK#$O`>BN{I-gbAImWr32xNw6g-a+I0k+*uA#pYc&M zxQ+XFZQ}JeAWyJ8`XpthgogAp(axXXI77l6GT>maG)x*ABl%nMHOY>7XP<>OZQ#W< ziXr-rA&@*+0yYkni25zu0e!~|`1s6XNWOywtO{O(X+ggvS(j{$a^Nx~x+GnfZjEyg z+My1HhWQOEkCIKnFGf5>28b(xokGc`d`rEy2HL~apz2Y-Mb?He#~#5rB!OjDx?W+OZmAQ$Qxpf@c}NRpixt+xPujpJuS@c%!YCjma+lr)!jp+z zCWDneHQY37NoJLq)_Iq-Vqe@dl)!U-EcZR-ggqM7lnS?aoSB$Krp3?;fo*|#4k3w6 zrpE9#v<^}EIRMw5EjEE$T!}p~KcQ887cat`B0J$(e7A*CY^s@Xm>$Xvg(vUm$ad}N zeR;|=&-)^+Tv3H?&~UI~aaXGi>$#{^&2q?&{|_NOI*#_QIwCwt3yOpU(F8+5k4FFB zw76M`GyfbSV&rUGj0({4v+8GP|JM*XQ-+iE&!K##3@__*Xm;%^S9a^{TsnL5cq!## zc!030vka@=<8JP`Ed6N3RqKz`cb59H*REUv@k6+1cX&onUAfqde?B@a<3@q4p%g!> zeWvUmgC$Ouc#C1QQ-LWCTY23)Gckft=LCC9R&m`NyIaPP+}yGy9-pjniRJk{E}!p^ zTu$Ab`G1+>^Suwr_0-MTpWpvKQv@77@S(|oIyQrIgvJXQcRIaz_q(pVs})y1#{Ih& zqr7o9-IikyJ^L<;u)%4@M~^ldF4yBGW3jRxt-J9&b=tId;Tqg$J&jvuL!Q{(RxLN^ z{9J;i{^f}G@{Vn8>0&#RRRxn(S(8;^6JDbVvi$?lADBarB43AKgHaLX*vnJKa}4FF-m5VBnqELFo?6ZEv$b< zEY)CXM+~~9X%+U%iX_EyN>a{~wIMUmK{hwpKVa|75P0MYsvHNhi5LEktOQmrXQ2;$2l^ptCR4 z>LziSCFbLI%TwTw?FyLH&6%80Qg;kew+`BU6KwhRXnjL!>-eJSgQ89FH7ZUmN_u)1 zd~#Pq!{=E;Clss2HY&)-@pnTjMwKvB>|`+Xuf1x0t{s^tzr21tGjN^ZMI7+qrHt|3 zCYuT4?eE&~I9RLr{-N>_# zyezpE;*$l@2=BpaW#qo24|vliu}4RbP?Yishd@w{AJTMR>7WabVe zYoz8Lz^-x$N0`=;35QV~N5M)F5+>x5qr14&doOBfS6st0{T;>>^kM$%J2HpGqll6n zF3LccPlcB+j}R_AgT^*c3!oJrD8-rki%E9Ki~j1uwZ)m&@uH7PjJ_6eG04xN9~}j0 zl&3*~6-D^mvK|*jI7mGpx0_^SUKjP8{M@KP=Z4u4u`$S67q+Z?PMI3j)$bzQ&juAH zF&rj7e1z1}oWob}x0d{AdYVz)Oj>3gfo>yKTv zuVEs%VS*+)*CD@6u)7?^zPpRFxuZkd!@t|8`ssgehxl$jHu8CqJF2_aP8uyQXvWV2 zYQJn?m2PvwC3J4pvNDVsxyN{LU&{GAp{Z>1x3CX0|A$7t@=swA$I=F)L;yqk0LBgK zjUtjy9DuIa@!4BUZ#jP(BdSyL4w9Otbtz!}3WE6u)3!Bu9KkrU(`#Tj|ErD*3yO z`*~-MZW9aYyb3hv2VT-s)Kr*M?o`c5N?S4cKpgM&%1zL3#cOb9A-`&Ep2C8J`y=is zy_qvLbqAu}{rzg{nZI)}&F`6db{5**-@3=9CGFQx>)ON?HMv+aj!RoMC#{DO0Q3%1 z`8};ZPnM!=*X?J6q1XpdU0=2?7#~8r6z#ke8mwd2lqWgfn2%iJ7Ed4VYo{)dL!~Jv zU7DsNp@C{DmlGzJ*u=i0rv!d;cx|S$%cBAf#4Srx+5%T8nUq>5F`Fwk!(w|!>lAvU zCw$dwX{ydG+D+WFXbC!k8{Gk`?e7@rVd$}`iyo8;HgK1%&WV4s3G-l-+H$-W4 z$}h$5Z|xxlEx1d5=!a9{44}9dTpS!+g%oB!1|uNZDJy&w4ZQUfsafbJ*^rL2%$32i z#bIWY@i%8&32(?!2xLkMd%)anf5aN*T%C2u8sz*ad&nB%oT_)k`penN+!oL2WpEFB18Tq3CbtTkFPV z4$kHd+vX<58|^Xa(%GgQ(Bx;EI=b(WkYboEFv8N**ZGja!89F5oEAyXh z{oPKVg6KWc{}{#2%t>Dry3w4QQrG+WqKZ8D_w26<$S3clqrCB3eBxaFxGi^3A5cmY z78=o!b|Vy~NmJRDVji??oz~tm{pgq&665Bo0l!;%{L|9=B|c`PG-@pOWEB#W)ZBvj z(9*D}VRCUy=iNfRVdQk(N_&`wHD_M_OEhbO&u(tCl!zmyi*&)j!1(+bNy;pFW#j;9 z7|K_eHAQCdi(w?smxKwZm{WYApa{-h#N9~H3aTR7`=*PDbQ}Ny_mLtRa-pC5LOAvK zUaxSba;~A_RB3S0&%^dK84^}uiE;WGu&heNrLY6di>n}M%Z;8HT=7uGGBQf;td5LO zf?QrS!dU+tXf?93O=hY3StG*8YR>ttbu+{HxHKe2(m7=owQ`v~ync7mZ9G}Pn@H1q zteRiPq7~4vSh2Ldbm^Au0=4!#A`FJ-Zq>!Wg5IO`S7#h?svV$>@&0bZttE3jfcEk` z$1W?5AbbCMGV3Hp)BbfU`fWd>QZ;Pc$D@L{tMn3V_u?a)a0;JR@!V&F!Ys@K1g;oW z)F!&cS`y?(C8TDtsEr;#C8y~Sq8u*AObwVN*RJ)}eZW2j-Akx)6r$UA>YgldBhi{U z6U%+OcnG&y&KIB&S&$p`-HV=^BsB9B#i(^X;`V|^r3zdzd99YFtcsVSBc)~^m$G%0G=BXHWNl~{# zJJ=}KX78?_ZuUpvLRBxv^`AuK(sbH}FM?+;gZh;dqwV0%+80%XW)aOT>BT==!}x=w^aJ7nk};v=C}aF*LZV%-a_) zu7j1l)V1kRm|mf?FF|Gh>mhErDv4oQSdge=(r&Nt11Zr^VCz?nlOG1N!^!6DbdO{( zvz9b2_Q+Av^972)?PDviySV~wN1N+%}^pOan`UsGw6&CTb z#Dp-yOi4j?9*oL;Y*tu1>;@!ipOCnD19_iqIj@!at4 z3!(x(rM)4>SGo)_tK&a6vkpI%wk}g+V7>Xxaj`g_i#`c+@3~06P5Y;_@npSbQs*vL zL8g_mWZfUHNp$af0pxygP`XC#;cAJpLPG-V4vwzBYEX?Kj5aPS2wLJ971xz9TdesD z=s&0+YtUaj1$#A`QlJL!hh9xmn$p(mnH8F!J}de?ja7)dwE7>t2(&_4Zeq8TbP}6Z zD)nl0ievS1*nw^}JE3x46syLn93QH3aumK5(h_uDv%1bXn*AoOpooz7tXV*;L?YU3*4Yt?t(mwll5A(Tc(VRL63Fqx+d4M z#LFrPT08J!0=5NM&76xbLy8LUYYqZCkPy(x{Pb0FZRF2{#e~0-yW(i1&7wlf%Fq`V z02l4^j9N>Jb);6f>JDu^d=I^vl;*~^riTDWtTd;L#WS~Qz3bLd0$w$U+XjO|x#7Wf z;)`nwcPQGO{q%T~S%SvOIL3~KgwB^#(<9Y3VL9W%iYgK;YL0dZJ0wmLmzz5uFsnp$ zvBYF2B=Ir}xM!K8djK7xEup%uA}9&3=67P?R2sp84J10aiO0YqrP9I_Pt%Lmh>y@x zC3vL9E0-rq&CpO~AM|9@VRBX7Q^^m{atBRG-TZm$z@0yHMds|d4+QGid2UM01!I;W z%~oT~KdgAsxohgQmV4)>LmKLf2>sU`Hasf|$8eNl2-zexIj`a$x_Kk5R9NP7_EgG5 zJgxchmCHoT<;YmR)z`M;J$o6<@IcsaI`vFAgP=cg#`E6z~M4kNH3*%>mRnNUb0c;Yz$Q^To_4`-Uy3|I+plR;V{!>;NQb7Q#p zs&p-dk;@@VlE1G_gg5b0su))#8zRD$&$>7Ll&J{T@QUHhglTF+sIR+N0}zbh?^+aG zD=K!9v+nCQ=||!uDzz?`1t9&bvx{bJ{z9q%-X>W4HL@Ym?o5$!XNE8%)mD*(`lST#kS`O)0;O4Pas{hb74ZcpwDc! z*O?}QkkoY%B7)2mkTq&=zj8WrPGy}5Lle41(yWWH+N+3U0REWE@3sNlvW0YhvpqTK zGZVrR2=|qp=i-^2Te)qXcwf{S$9HobSMgqZsLv9?UIEEq)o$oye_*KBklZiAXffIj8a?*3B_ zr!24A%_x0XhN6Q|_lBq^^{%A02vVDBvzx9k_$usAap7k7ie^X&W6QIKJV(fJHtTC#2XL;M~Q z@>#67jwlbF4tKl9^%sLOW8i|2UUktv>UWrjd=JbjThpC#6+HCa?fbNbT384qr zV)R+->E~%RG6KVjULHhI4H41ocjs$$jgCurF?_bcmf zXKqbikL;gpa*l#hkP9lq->tT4_hv3_utM(71p5h%bwu>3==a{5NOPjg+9bVs_LBdY zwIJWmOb=;^PT>Bu$Jg0uI`D!^GM}M%9;2S4V-~Z*_M?ok@Phmlc!kDKqQ`7Qu6l6% zfg#jB*tk}pnF~}2OM&c8PR&%~{}t2z8zCfI;$_?NPMsu-_z0jtMJ27KP1?zB;mpV-oI(8w&40Po~c zl{qBN`6pZgqa`_mHJ1@V#}2=7q?Kz?whCu?^swX0uoDjZMvA+|Q6HNeewIvnB7LZs z2ns`rRfwddM9%s<#o+=0oQicSyokDG9}rln)D?|_m3r#onzsL)G-wxDIu4OItbo5M zE%Rk^w|%_Y*QI?RF;unH)qQ#e>_lQ}!d6XT!WSo-<#?H!PRo49M<@Mcfxg5iQl!A) zc-^}m!7^N?j{229w>`XhcP|M2YI^ChWOX%~dob_4vZ_-3z6cS0U^lKjO%6qL!!I_x zy6rZ)rabd?a{5vyZG(D zw=v@YJaRjHvps+)ac_x0#i1WmCADeeYf;`YAb>L+#HNDLtVQ1`&mVB7YM%n=rp769 z*amkh*|ezUzdlDyqOZ9KH5^vIo&9k7lROkPtDfP_f0PfkM$C|*7RoLb~nLk3h0;{@9ArgjNbgMjUdc8DOmzoY) z`Y`e#x;cQCqwL8{d5nqS6ic!3X*jy(%T<1IvZ?Hn2sB*DW#i7st**2LG~>A2Kw2X} z;Me745$;KM5=}=Rs{Jo~JV=joQ z)ub6=J>y9ge5|wEeA)2D4(azHw6P(ncwB$<56oux!)A8=V3_Ku{*CA@wJdY$JI?sZ zrbV8O=^{w(Ulhx$g4Xm*H`^Q?fWs`dkL?KgQSGiH`a7(Gr4sl15sf0N4s9e~^|~yD z99L|#>ZM}otTP?X(da)6u6COXLn~UEi6=pL0+5(dw73DoNDDv*PKvL;>%CL>CV1^~ z=j$s%VoFbu-xm6s6#a12LjUYrNmeemW>;tfD!aNf>Ly3Xq>9Mhyn1Gdw$1!CX&6HJzgnu;q1;~3WjBpcd~52b@#t!Rlx4AEKckh8To$rvlhsestUm8& z4()jLy-uw5B%c@=5-%=)R@C2^2+a18Y7t<9=}V`x)P&Ym2{!Vm*=#!}Q(J7*xVvB7 zB7f=gGieU1!|*UGH=*@W6s0JT=8Mcem9mFSyf{YyyiD`;;xELvtuTLKd4=7*H!#(w zZ-!1s=#Z+GuAH!N)B5zQi-~>9aRgORvo=)SR`}NnCr{`@ho|`U7d#vB?roBC56=^n$aq6j zVBcKZth&H*WC=uH2Olq*Neb33h$QY`zkOA3x(*iW(B@G|oXK>JX5uNZ@&r9h({l-4 z;(u<4SZB3`X8|`MJ_qD^ol{_$^yEoLr-&_}g9uh(4w2N{4GHgCTz}E<#b=Jw($;8A zo@1ww${^9k5&C31(!dbpX2UVN5DBdfgQLQi`#Iyqps?(g)bM-*o)XH0?p(pmZ6!B% z!1tI|#})oDl=B+-l}mJUza5E&v)?_BYg>Ld(0?Cqi2eNTW7Pd$;<~Rddpu8?{(hE5 zZC($Wf7w5)=y&ga1@V1chqxma3p^A`LI)a;+cDzt?}11n zWa5vyS7RZ+4R60!mSb)E_o9(AOnc9Zqwcr*8lOi@{a1xp1-hP(H2=4dI8i^*&cDhz zGRgXKS^BRbaew!Rw;<(uBK^;b{2BF*48daqUuwFaiOw&GpHEu+yWZ2edKU90?fDz~7xKRn*w@H_ApcJSTLt1jO)UP~l6cdkajCD0H~~k;rHWA$qVR!a zbQENj=YEcKYN@rNv~Ys$=Pm zE(k8qSwS(Fvp(7>_hDAk`HlRR5+VrhcYufmI^u+)JD|z+!DNQAIbezJ5M+kIJD@W5 znYMma-@(%fV=3k4Z)y8wzy14}syoDf$B{qQes9g=tNb?KGg9_I(lcXsRLM0)_RyRM z)YP^nf7H=6LH6*S#}_7=X&(P(%|h%GF|UN`S=Dwa^%FeZ0&ABTz5KLeO0S6OIoEc% zr4y))Tzr=xz2dYJQ?GQrJoRHtuaItY_wFCHr%53-cjJ&1e+3F7!vOrJJO zMRz zgzNFVXE?Tpcn^2mS03;kS&He`_;0WSOLEdkL?zZ{;;zh5-+=d!#&s$ln5o>;#Ai>^e3 zJgMdpBpBEZ%74Ux|5o8?LjAKOS_~sQ-FwY@ZNs(WoDsgrbUcydw0_ggdreP6Bw_U@ z%5gn@G)o=rh3@RK?a~K;a>&H^r;lPd>Tbn6j?cAheu8iO0JSH}05mZ>3))w_O%#?} zlI(Q8xvUL0cw~UX+UDIvdODl=WTx|>%a(jmLS7oaXNS%2pZu-ZeB{abXF}4iwK` zV#bA8*I0HrXr34cD<|H;6F<4TOe__o#EeMckqpR05kX*P@j9Jb3vJ?T$X6QNZI{eB zfy~>YBnh6ZHejj3c6{JGhZ1tc!r*RLZ6HfuYf*82rArg7f1&J)abt_VVA}1-dxx$M zv|L2&=`J5dV~mZuuAS_Yhqu?C?9(Si8yo_>P40Tj27hUiR#%mqR{mCG!z0g339s*Aekd@x@2NIp zB1KB8SFxwr(! zf=K-E1Rk1`PJFZ@S?6N#(FXmbl;XV)xam$X`&b-5j-`dayyBJA2;4X2bjQvv$pj1{ z4G~7PA?y?=$_s(Gx$gVBn1}XkwLb=3FyGkQCHJBD&0Is z1eW8Qc(%Ip%MmbUsLRHqeaf%(7dUz~=qXHEaq-Tiia zi2wbuK$zj&&fCTdhfif=trj>{w{PSAFti(MCnkJ}LAXF^ISOlP|QtDA2G9RzlTx0mSh%1Taw$g~0s#Mg&Ibs)##(RsUzE*n%sp`R(V^X>IR`I@nQ{u)f{dcr9;6l+3!~i16r_)= zTn#bS%@l!3OLnIl*bxkO`%5*RWuTqrcB`(r1gmn};l0M!+?nKcKOcNV)}!t;b!Iya z_XEt#hAXuu;kO%+eV2-_nl=^uIw31P%dQNSedi9f{KI1lZs;C^AwQ^F=m?JpG68HB1r+2I}Ob^kVp)qKL%J zPG{{Q1ku_+Vpo0Sh2PX1m}n<^i#|mJ4yfl|M)Gpi2J7!h3*9ejRf>S_aAr5P2s*CIu&KS$JVH-%M{R-Ld53j8lK_c zaTF(Xn>yh^=HoWfo0&~pv_|lM9J9kweTZsT!5_c;?2Nb%j`%zmBghx%9-5$E6gurb zSnYwy4LI-y4NY7VV!-upVPAQ6vf0TNWZx|#vjrXU5Jc;Le}}GJ$_e;|+7KabjE!>o z7eWeKZKv|%>S4IU-jAupyn5%}|G%O%5eT_XlF#UD^Z5wNC)M_jqt#5kFYxt^Fo^6#tbV*t}Nheyg>m@-c3+j zVSrXy>{aXqb^bEw6(Kf?YlDepc4J0GX0Rbv`pP6jb@|M3ofY!DCmaM`^y|f+wybY$Vo(7JWr2joAlGOOU-sxW= zKVE0e#eWDodH#o?6ZAizQ{I^DF5OCJK@^v5GQGM)B9uH}cY*bp|g_Q(6?HKiZ{K*71^* zD4bzXDwOCMPq`V~O}QCp3SDXX2q{;AV=VzTf$iy0$aFYg8flTK08n|F%>>hcx3A>^ z!m~zaU;f2`_tyHnKR%%o9rAx0I{m|e{~kK2O<3bgVC1A{X;xA2CED`T5%Q^JeYd?H zK3!vts9LU$lr1pfB9fH9xa_9HQ>~P}u(`Kiqau0@_C!;E>lp^dX+v#e% z7Ok22oEF~nyv+rfof`l8O_I;VCH?_FIj`4jgoF*4Mh*Z;~R^c zJ16{})?_mk@6iBb}n0^dl_kri>S3%;X-0J+ccQyiD&H4{i;A!rGa z3oPyt1jLiJUQ*z(Nl;EM$=*f2=c-K8=Ffy4%NHickiSD$_&|Ig_C_h04GH|FszzTF zEuyq~*dC~y;-e;H;bSVxhN8*-AY&#IKuo0WTx$~ z1rYNyf`LQf%$8l3-f@Bcb>3^{oBb6TT1xddok}CAdnfgr>L5CN7qUvv)OaF4^z-95 z{CJMhRokaD@W$J=t|bhWV8e7SgVxv=N9 zn6hU2GhAoV#t-JIm+(2s@)cT5IobuZWkkRBj0z4ikxN65SA;Fe^4?{yIb4iZF?feC*mM3A9BT4$y-_E=5;(O)v0 zM=`9vaG^$+Ar0yW2u9$J0^s(miwiTF4#ch)V7}meTlxOlxTTk2T2uKCJ8N#yWqPXS z>1(zrIPtid=3ZUXn14NQ`|Np@dYo4sNvy3R^s&H>O72fEXbG4jxS?(}LOb(pIqfM6 zh@TCsv1XKdi=Fi8N}N`wfRQulFYmZqO$&Qw)qpuw_1%-o&T$4wuCmNqTk{FD=dk@$v-Fl&%hE-#{V2F@j>|~EHU6D$5!@Z z39~n<5A?4UfpAwd(#+S>w9=FEONjSZG;)$F4|MiZ6!udzk~H%pLRM)n#TnUQo^P0D z3+LiG3=2O5QgFw}Jb+?clMtexoqDSSC2KQI(@$}0kFd`uu_(=JRW$&Ezth75T(MIQ zQ%nwls<*u$sC1xOroL8k=2%9mvQ}mvXHdmcK>#(8{_jDR{xd5zGV%fbKZHuW{%=%b_?V||`uVtez=M5(c=-J7 z|ICB^|4HbC;dp4=-;OEaeBu!)?YrTAP&n5#{U}2~q`3%qP5LQc(JAoEs&6!ziK=t+0^1r`4IDjwWL<=wL7 z-O5j|LqD+Fe^U{J$3Rn%_RRSKNgg0L5d?R;@2*ELdNgQ!eFiu zSq1d}hD!e7Kt9GZ*m!6#u-eZ|-~T2f_74aCPgL^%;=l9DK4m_0^s84N>k(~pXs|CT6l7NMp~b>(>8z=~W$K4o z2?kT1FbcyIa9X-_{C3%Xsh-WUn+>vePk{1qXZ1KP?vZ{^U)yM;&~qp}Y+JDAx~6|t ze|`dJZ-1-dIrM*>$FRml@8{RAQMuR!w(HU@V!<`FGb3Eq()FD~4sQz!#dq@Re_Twa z0VWNrm2+w;^=Oju+%7V8nJ~OTiOh}J>b@9%P+pLGS{Gd?K)44u+aBS@nFV8tdlnA- zgDk_25qn~>^ddK-V1v1xRp8X*MghRYl6QUCcn_0z z*jrj{ds*fj+GPt%Bwy+bCNY1-HdU)Iv&BTatl6FZtOuw&$r9dz3HW~TeEln}gBQ^3CzC0}O zuC5XDX!|vG+z=8zvHXP+Y>_MsE>9E)aT zaxJZ}k%zUAN5&bRr}P6nB7q`QP6;+6kU+LhBnbd(MU^`}A8v_8FU%05C5fWJTPI_s z`F)N`Qro;wyo$>$p>h1TdkdQSBiaY(X3uxzY8{4XMfhuVf~tYi*Oq9Ep9%dtsR}S~ zuK`LPU$1H6&2e0~g$+mkZ0E@tmq;$7HqgnOOQ2n&@F(6qXK{%%fq9>N95GK|OBH?Z z{$#JzZS}kTE+S>`Vjl7{{fDX-7wJvgmCBMu2BnOPOnx+3>C&o>$@78qE7T1mQ^H%5 z^9>ToFzG9}MJ{)jy)~c&tBPcP#3z#!lcbVZ$}yg7} zEejmw8Z73<-h7SS2r?*QKE0#;WZ=`$f7R$NP*q zkMlzN3I4t0f8!EAi^B3C`4$){LWBZN)Gh+^el#Omguu#fS)WjlHIzB%jmfzk*mt@uJM6{t zKdL*+xT?0Mfgif08>BlWrKLNhyTJpJN=i#8t#nF=w19+2HzHk9igbg3gmk>;sP|sr zy^jy~!k0&X8~Ng%|CyPyX7;Q#v*nz6*1{$x??8JajmEGU>*Kf^!xup@hLCPH3j5Ld zg`qjNZ^E_d-JfqlZ*y_`tc+DmbZpV=X!3=m;iiBfTA<;)_lPLDssG$)P1rpcc$Y)c zs6wXoy9c%?Q=LR4dr0e<&O<)8WNdoMVY;*&7$qoXpFnH23zYR!mo4qq&@AUy2#$?V zFt_0p6luHN4AH;0ziu77qpV&rK^J@<2z6sD{6I6VJ3bhLWtS{X$b!L2gS2RH%@SSK zIy5^qKh`-!aC~q6)(9S|1N7ILJ30Ifl~{tH_N#t_$1*)#qHH*o$*vn6ZvYIH8wMST zcaHNCwrbmsAB?V)0B&zhp+sM9(1xf*we)=993Ag1+gXO(iNZFZ5x})?X5Kf zTwEdkP)L#3z!7?HHTRXQ+##exxuRh4{9{IaF6LZLrs2}$aaq0mqId5+v3G|k=(&p^ z$*Pq2UFwO)*Sos*pl}dv=)HO$j+IS(QB&0PUw7_to}nOVsP%W6uO>@6HCoHYd<;vx zOkCU~nYn(djJPnYdB0Y-+-f-Lw1Ii+AWu-emeBZ)W!|vCtuOmdu5Ut-xr;uaAyij? z`;IPBo-c%l@h_HPo%&ZUZ2AjvOkQ9h5;F&~0- z8j&&WQ(}mRijNj8$wU}*FqBjwh|HJ}FUf&!Wuy(TTmsEk*$Bx1D%~J>mQ~i#6nGC)Ces z6?WwDY>iz85@L)G$~KH-M@9PXOUD-}kJ19GgudG)Xh#JHs(>+rW4HXq(2H~Y}wq=>~;`Z3)C^OXeM+{>2Ip6G@R@tq18=xlT!RdgvS_v6RpvqKdaCJDE8-IRl-{fM&K|vl^#%?4v`} z2c8aoJUH^vrvuL1r$9E}UjRO=DLYmNRo$vr*puJfuE$FnqW~JPY)8Z-wOT&0av|5d zof)KM9bVwW*@2d>$|AzBhwnLjxwyR#Pleyt zsP>qtK%^3f1Sp{p5^`!2IJHWZ=gZo!WotB^FBk9)d(;sRR%t!|{4i0aX@Omqr191y zZ#7c>*r|}w`bDP11tR$@7>Gb=IX$90AVuu3{ZnfDhC#cged?(Lb zP`g(<;jx-iR$>*EtqS=~WmuPP@%`(87gSB&sWbK31$3`93a~f|vbRVXWC|8tg?W1k zGC@T*x#U`g3KgE)3G|r@rNx_qV)4h_l_yHepu_x?gc$@3!h_@b9UW*8_@ql);k*1^ zyYX>6L|`)0Plz%<^|+ZLzIXi6tT4V=p!W;O;Q$zDFq6(#fUTJYNCpf^UqQCYEdj=krlCG zhEm1IKN?!@dC!gIgMk-D^cqtn2rc3JOCACQ|nyO9ME zPLW%RyiXWgoI;W``!pw3QW7Fn@~&X(%g7Y^d5;of&-MVX@>SSbjgUu_aqE!DuCMP$ zOhoMPc`zVrRVa|9(Z<3&#g@~$)+A55a31uoZT<(6OF9_`#sv?dMmF!HYa`%BT9pq!O(~j+EM8`3H471hmX5HC zjN^7KSW`3C4(|`_Z>xS(tFZ_Wsrt_ffF+BjmDWKdq2)d_?5fhYaA3<@E#3!tUcQE89?HzZ^2Z<4w?x zoPkfINR(u71ARdYX)wiI$Yc|VHc`0rt|sOM1;P>XJv0U#CnQC_a%KTm;O{dF4S6d< z@M)+dz4gxu%guDs(vh%bQRES=fqL{BLDLu?WYS|QEuf{OICb3gw5+Q8)+%LErD6aO z0$2#$@CbgB%^BDtLD}W?rPD&?8W~Xx^gf~kn?R=zTEl`%FQxD;1!lNxYxn1C%GN^{ zyx9Hy6^3Ocy10{d3)P47T=QRi)FTo;TnPR=|MDf_;QHDPo8(ZvQ~WO99Zv(k@5T4t z@S5GwxaF*pwfJ&)TqS&B=u?k}Ao1aLa#(swk9+F_IGAAfL6(k=(xAC|MjI$(|Db1T zWwNk3UuOix?E1zu*;yJ?0~R$K3VGBU?7mp6Mnu9jR`TghCDFel#I@jgy&YsT6x!{& zWb;6wMmeWoW{q3K8)s#ZjjUWcfuFi*%t<{s7I`#Bn-k&dAhg#P`_*=xhq{DL8B(s( zotz(Y^&p);7swS>n+f33>3k2)YzU-xi>IwI*8-Z~WoKN@%;~2gk?Avj(uHGrkUM35 z+N6WisWox5e`v{QMuZjIQx#(!wQooBhOZz+c(g?0l{dnyIK~0LvlrH_>ZEV_1{;b1oyH!~WX9z8Pkq8=r>lO1Bj25lXigtZ zmuhl2AnhKM9J78-g@%!`CI+%&;V-Gq-U_aC7a zzHm+_HZ+~aw~K3+ks53aH#az~9jP~TH$PQrqJT>#7T~_^36Ih{hdJ9P2zq~&{+>Qt zet#mvJEJ^2MJ4;e2&9s9I0$`zNxqK@cvUx3y$|(M8DF%@1$Aah(ffYnNQzgG@HLfW z?(Bj<#iMIoLD)uY+(2dV3Kb{YvAA{1woAF0U5UkZE#0*)*<6fc)dx>H0U8}f&0XH} z9sXzIFlsy%LUOU82gGl_8KBWYt+8f_b;IV7$u??4OKo}DXHBdrH)s87I z0w?$zR6AS6=LLBV)FuQK5#nw88TQQa8z&5SS)-ecv5b?``48#!XY{?>W2E=D7*eSz z7^TY)N;)W%U0z43)_hWOEsIoL2|%kIlK$*m8u|7VA~u;@Gp-FJ)WdGeN9oD^*W>WjbBl_ z`F|p6$HlRgyw4(7#x^VV&7OU$N~_oTPY&WtIecKozYuiH40G(?Ur#xJ?V6)D4c^5w z%o>ez@{c#^?o}vm%%KIbdW+verEYCTi4HOBlkMIk}8Cx z--Dya^8uQHssa=MP*q@hKB5zJFoz&Jv0T{3$71>m%Ajdz8Xc7&KK&^Iecg>N#q6yL zppf}I&cI`tJimagHlgo_#NcB%?Z8R9ud|RB)VH%@YPJcA*)CVF27uM~`0%pA$to0Fzr)ErR*kOr9_f3Z*)Q@Ix7qDIeAySS`ouR{hv?wb!&Fsr{m; z(Vxd!I`cwhX9eTn26DdbQODl9ErzhxrTwpRMh%s8iXR3TGGrtw z;`%;K1rZsy>(=I~1T!R)9ZT1kZ%lKs2xBWGzCywYt0?z%*O{y3_=0QGQlP4@*~%nr zf5#n4I@(?<c>JHg|t4!JqE0R?lgoF&Dzkn2;44&$j`+i&Bb1QtRKOrq{PQWuD4!XzpsSqy0+-@jD)H| zQ%!AFX!a^9p;FbF)9* z>&!%un!t6GgKNSnL`)wnP`-(o_aW0;d_z-lu^eG&H_}7&Va>}cY8dWbb2wfrIl zG7Wf=cJXHjK|&=6@cJ!_so)-ONe|M5HuCF=6}y;NL^HK@fSd z8+BvdaSCom?Wk1tHuG!x2hH|v!s+S0k9ydfF?V04!c0~p%OOqL!-uCoHoYxAf@Sit zn&e$bgAOUTf;FU$AZ#vnonaahYWX*GTu)(j-`n#=qs|fOpXgX04&>MB0Mthn_?*(V zN1UJPp))aV=@hQB_u{9=%iC5l9)5c|xrOd+J(L#YLn7rLNaRfO_-&EF76v4v>DLBm zLEyUM!;h39!|o1GU_Q)zxa;8wxz?-ci9vN#IHsSiX`>|!Upp+GUmrtn%;7)BuTXRFc7C_#;iD7}l6|AqXh3MCG0ArEO8LY1@1LN0~iHdy#BFB%QXXplY z2q3r97h(KqLJdq;kK#JR>4{rx_PP&Y6imB6dxOEPUMQlWV`Hd>D{;+7WDSI-$Nt9Gre zus*LU^wZffj(RMTn#~?M?2g^7I~;SK7|Su(u36eztpu9i>M-jpbu6*wh*6bhP2xdvFP;C0wt=eJ2$vHAgY@V8AXxZN#V$RSoZio zSUZ0Ycg=V{;PSa@p}X(7re~>t!{R|Z4p}9xr%N=$vn(T@kBwd7 z@w5`q4QUVgd3{{SsAV>a#O&PfS{U2W3mktL_c%no`2sYBM-Ip~i8~)&dzc{ZB9}bW zbuh{hlbLTmlu$E}%a@LeOg7ol-(Y^g+!BD`V6-Xj!2f(}%btyyv9rxw_^G%mBW0uM zuAY{zpAadI9@+ZC)f`mPf}Bd?*Pno+xt8u-pEQRbC*u=xuS|l83Hdz@A za!vzzB5Vj=7f_8n6W@~wz+OTu;SRKnbHw0VIm(mDqqvnsizD!gw1YCeP`uGE))1zk z+?LXmdfeYcOS3(OUoGdjQ74(Hy*O?&bPtl#PZ`qaO(i@)om(-}OAY;iN}No15#P<1 zw3sSVZZFD`JwyeX1bH$`1&T)75vRMRblR zHz~iDl985)`;n^o-htkP@K_(FeA?J6jombnRpyC*DVLCo<7%2csJtsmJJXDk!NO)TY%(hHmI!3Kxd{Ls#&Lumg zTkg?79F;5@yTZH#yxUZD{G>=wfW=bzYrJs9=&6_&@T(IUIS($xTn~QvwoUhFSw-Y0 z-J&qQm@b&aP!6Wn^ai9AI?_nlPaKc1UiFC{+t7B45 zWzZUicI^pZ=LLES)%cOi$oo$>9o78G2QvIA??xcQRhQ|z)Vl5+pib>tlEjvsK%EcO zPNo4}*A%MRuQOC*{7*gZYvQqki}Wl1vPdubNH~S}LZ}gxaKLQ3p2vNTYYx>}e|N%F zw(agy&?UjzBKz%q~ zi0n&0tCchnRxT@(@ zE%NdE5oIE6DP-jhK!I$kH-2@?)GL@Tz6DDek1_{1b{ZTjOGA;eUnBYmCG6iYPw-Qx zlJ>#K6Z3AH>!r_h3m^}-3hdWbp##ixpP-AOa=^B7g(;{8B(o!r$%r;AMoSO{R|SrA z;N{Pnh_~|^1+L*N8bdP_VlYf<7=P#YWZk_VtDT=1ED$6Fty6(_^1l8lrek!>f!fFW zk%eJcJRY zYRVhR4us)hwuTDAf$Ktm{t^}wY*=-F|HFOd{q&Z%Rqqv76AKaAZbF_f+f4h!(eU*-&GQw2B77fE=u1ggW|G zk#rDy#3s9G*#CdQ)wh;ilz9a`Dw0i=(X;!i?bi6N(9ty!;*{Ichr6Vek4}ibJ1n&uMos z(z*W)YuGsN)^|4r3B~cEc+5Azdo-A##2CxZfUWuf0?)$?RTgbI%kp_f1iSUgs6c_0 zSMy&?hDvsvD=i{AJS}&%wSC}89hT~R{DUI0{D}$ z_xehz^_#NMFDIxCc98B0gnvODx?eHs*0S zf;&KAvko+_TR z8AiFPw)hz}JWXH_pm&HK@s=>nQcxnoiNax(MylT?q~MrR`m=m*X$rYMU*rR6C1d@E z3ZAa+yo!=P`+Z8nbLejAR=?$77w;TyDrPL?s39L|5-6wS`65u5PTIwP4340^&8^=) z$?yLx6(E9C2LMojPS5@L<<}3ah?!Lo4W-D70%O84F0yWrxuM15!;H>ngaPZN+ zv%>lRop1*`BO7}wTeDlgv)*~BWX-1>3?R|qOa6*d{}9i_#>vU-0OI%)1MEl_KD^+t z6)yZA{{ti(${@!7@{|$?Qy@Qd%y`T01d&u9kV*T~$!7jQRq8nTV z>e5;VT_RVI_(LueXt&bgcOD8pyxgs=8=)D|4uA+zOlXsd4F0a60o0eVcv29aT$H@@ z=3VhX+H~hpH3gF)a`!$=j?3!%94e}Km!Qy`Tf90{9G2C%p&piH1tWIHBSD*|C)Na0TQI?9{hcljHs#SKfJ z7RtYFe9NJ*^F17m=40;h z3iv8WKe|MRZR>D_{XtTBX)YEhckgH(b30;A-m&1 z8%9_2;=50ez0-A8tvs`j1xGRL*)W4ug1wGUjt_sVZ1DHE?{UfEoN-j53yTZ&e{&n~ zP^iBaFByu3Mz);~vNT}Hsg5JGfLz|8Ljuz(f;kb3?$O#G#3rVF;D zJSTz|v?%;`%-5gG4M@`8E;V3D;Dr?)0G8B6Y53j{bkB0ob?Z+_06@BT+$ANMS=kyJ zSuug~Jo?VF>%L6!BBMfvGby(DbJg4FA(Q`xgZg%1)al$Q0I~>pX=;)Rie7 z{rEOqf&wC*jqnIW@D}8P=PB++UzNfJG_De~>Rf`tbG0ml3q-;9|8(&_=86QgfZvp4YjRm2i-y&0F_2y4LDzX8v$g93dSK*oSs=^8 z;UqNBID;_#i$E;v*9c?_ssvxoWVdPWD|FES0I8rg{=A0X?_3{{!~IJ;^7;3;o%JDH z(14SF5pVunX3zSN!+p@RfxJNDKCT1k&)0ul4(R-#;Ilv@oYx6-X_K9w zT6-4g3)hu^e$BAGoLbJ0emzS;#B*heOMjov&j&n9;VF1Uip#IV=V#%ag}@cQ62z~` zc$YKC`GIn0DKtf{OmS(0oS%$zmI7PqiWHYK2sm{;KL+S5M85QuAbt%60vY7LWUye0 z^Rsl$Qmn~bnc}BGE0FWG@&-BI>vtAnS@%j1zdHXeXOQy^b7v_S^sh{DX@i_^Z8=K; zXL3b~%Nyi;8^>9QRMRU#{A%jBoI%caL!70UxOZiWOB>|;IpMPuvX)n*xV%BmpNTyS zF$)?-{O|M7d%sS|Ud|xrPnMpgptQL%#ib2${xsNGiXFQvQe563=TAPJg?R38C5T_= zpe|>S^GArzQgk|Anc~t0Ie*UJECrM66)7%nkn?-$XCYeLt^jdvhy8a(`TKgf=($1& zs>c;b&TdhIK>lIPl>bi~A<*l9{_(p056k=K_omP4qWB8Z&+b(J wcSpUr6-oW47Qof@CGY>Dg)>{9AX$IAp$G%2&;tNGpuf8}0RU+qP@xp?e<|46qW}N^ literal 0 HcmV?d00001 diff --git a/core/src/test/resources/indices/bwc/repo-2.3.5.zip b/core/src/test/resources/indices/bwc/repo-2.3.5.zip new file mode 100644 index 0000000000000000000000000000000000000000..73b27dc83ce6249d969226b6b452c3bf4c17e0d6 GIT binary patch literal 104147 zcmbTd1CS=cwk_PYZQHi(X?^YPY1_7Kd~MsdHEm4Ww!5dznSaiS`{KQOYo?H{{!2~#LVUYqKN+|4DSDg`DalX*_hawIR76Z@&6Sh%fAKv z2c5D=n&4f_2%%fF)cI^**nY) zVq1Ck`YZTP0rQ8SpR4Pv3z1*`&zF|pH>oTXeb2v|EDSX0?(4m6?dyH7rwnFx4P4X& z&UgRpW&VU1IQ{;60y`%^b3^!-_To=X1DQ7f^2nddKkC!Ih3DN>EsTlDi%nB@JRyTa za@w%ah)%TI;V`Y*YIYP$(`z>AUF~zvPRU_$?rvJ}2Nh>BRz7cuapM&+lLhA+kkct` z?TAn9%{yAA*JpG-?bO@G&UYQOC+S#=7F9tKxikFsixU+@oN>LR%Z7#~SFcEN=D_vw zW27-CKT)<+g%MDrXkO5y8K}4meBqENt^ve@=;?J-6|}D%S5x_f3{Y%yOl zP1_lusF77Cuz96=b#L{?JY{fjG@Rqm$|JVcWeuXDX2P zMuhW#4M&J$cnioji_v;?*MWXN%%oZeoACQtS;|d*6}E5XnO!uMU#DzQL=id{^B93U z4uIN9x6(j@{H%)9CKvnE<)X(D`hNE zzi4W8WS(syBSdC*S zuO=f&<4vS<9Q!=j<18&_ibm!<&QEq;Lx(MK0ZsjzV2ORAWeW$Qz1BLwcma`)hd^$% zjYyApo17+qw_*}1@%E7+veju>pl4T&?eFhEdX{QHUTsPmGY3n{N(h9`losp!qgeXa%%z6+nWhocSdpW#) zLdDs7P;DbtI~_4gBNc+Ctbg(1{bPsEn)aPs(_})E^-AtZ*6*5EjiX6?y_q|U3dY}!iruB=FrVl z3dR_#2ys55#NwNLuavcvG9fpNi9w)80yUxl)2sCeL531&4>z&atOqvrp*dcrDbl*8 zq2^YkxW0q@Wp1OIB{XQDXtgofw*9o>LIo0@9a=4=T~r@447!w@4AFA98O7+xfg0an zqLdyMaJ6^Gvm~=x??{@;A?PV#tu}>WPE?4fd)EH2GAny|X_Ci{tsu{an9n zD2t9fF826o%Ih_X;QdAQzRJZ?)g)Ce+sQOb)eGY}ZA5BB0t3h8R_v)5qUQJzO$RLE zO}Qyyl)0)hUY@L$WCzpZ%54bhfml=@V{;od?KqZuFA*w$sV^6{(S|C_F02;ppTRRF7%`VZ41)P0*gmq_U-k*={3H zO#hn-vL5}-OK3o=H5F>)arE6ZwKaXKiAA~XWshLf%S4^HS7-Ryn?NV5{a5_1s$O#I zdW}JYURk_BKF5%I{eHL-s7mc*t^7EfAyE+b`U3sg~J;!P;XR9ianJHhfHQ>dk8cJvEkJ zs-ta7z@WpHc?cmUIF5vd%uoObckQyU0WN>ivdlBRz*Ag^alX}ncF&xZHR8FY+t}(> znS4_#O=}NcM!>!dtDS!hI;x@!zv(Er4+#Mc6ritF>Lh<9EF%m8_9oECo5zG#R-&&g zXIytJG3l(XG?H55YC3lI^FIw}Q(BnVnVn=fVWm4~tz5d#8QgVD5b$Y0+&3GRD2?FcpXRi;z$>a&xBf2I>G+d`s)n|l6Op;TS|$=mv-Gwv(A zS_2-f_0H{uQa?IcGYCB!a{{bwd?^?As&dqp)h}AI3Eux$EVbakeIn4rE_7FIE1t9p zYqOqY5x3?;=c#YhSsPfK3u|sFB@Ebd-1e$2nZ!|zBV?D==DJOM>f?*HR%czxKUA+2 z^Rf}ZSF04WP$FYxY-;Gjd-XP)=Y??CaqgdSE>M2vttVVw8Q|nfi{tIQYW@}8JHi`$ zoGR7cR+OrGq%u#fIEKKEWN*ZPW=bKC;DzTrlom;Kb~4|lVaP^+3!K&w8*|f?UYx|m z2hg>bM6ZRd%4Xl0itga0)-r9#Hb+IOUH0wxt5Feb;+4Uhi_$cNQQ!5l1tJ*3KeQ{m zRafr=a~~Ub=*JVJYjkeb1R?!xa?7$OtGZ*VnU3vFsWzxof*4fv*&Bxw6GwGyUwbL2 zzNx;7UXIOiNIkkxk;CW6cnR5b4Dx^=#ENn@LjKoh=4(r%2hKCF&r7!qGB>a)$bXAkRPv^zf^ zG#ADajPz4n;^tjgT)%Id`C8GL!gqI@Quo<>YRVPEUY}O#J_Jb@L-H{9#qZwDc&eqG zJ()tNLO~!^5<#Dvy*J&>N~Ly5z97 z@C2Lu4dL8-p{BJxlyl0%-eB8pnBG1$kA}rLv8Q*KWy?>s=zpMMP zapIs-8|w?+_+(J+a2+>axaW}%6!6upqtA^0t!DF9>3 zcHz7H4hZq#Jb18HZy#)x+|1kX;EsS0OW*K3q}^ zg(1~COjcGpf9s3lWSIa?-6joQOw(#`XsAZDHx>me?ZVS7{YZo~9sXordy^A5I?Y*%}X>j>W#OUgr5SeYtP6Sh3^T zmQNFcRir{A^}9epS7h74VF>!|-0E}r#zrj9Na0saZH?w*DI)rhgM_MdB@~Hm|MK%z(Z@t(cCZ?gj3tC$=H(!)qeaQ zWn7DwT9pr?+yeZIESUyUe!I?E0(d zj=t!uskRU;4EV44E^*UuhY)SiG=IUrsINsWdH% zWjYZD<24{J*ymRxOKX-wbS{TNG(dp9f{nqcAA#Q(>a;DkSED7k25#eNdaskR#Vf~m z@T#p4q1Kb#X;|Hi>=PBH`I>BA0sV_B~5bS6~3yI#zln^vr z2psTXSE&__t3uCi;;#lLY9C?EV8<)(37jvTBHe=2KBN+f!mha2oV32*m}tt)g{}TJ z_9gmt46j7lpOg9=7sn-;YU|s4ddFX&_Tp??(<>cpv|hl@lT}bxW0lc{<6#SFg8+f= zAf-Ve_dt6xDR=S7wYQ7=tf9}VTYZDyYgW}p24+}Ps905Uu0ShT@FfFdDfNKn-0z|G z#!ppv!e2F!S;XyFy>a#}7R`pi#Pr$L7DvO~QP<6N;GT<}Em7OCzC=x=zNLFHgsQ`| z4Ph(m1pq$T(`T_}^k$EgeFAM_L@Jdq94n6527l7VArJ~v_j7nVc2^_Ug8G3gv8Hu} zcYCfBk|&5_Z9~Y0p83}vXLrU)F8lAjsBaUxy{GgKSjDU59*^T%rPkfLNPe1)xyt!& z*l2aDW%9X~dR!B+9}I5xJIkZ%I@-zSA$Wq2m@%}tfn!L^LylY&VA)MRsr)m1_618# z)nRcJ7s#K>gRLt5I2z#}N7k}6YaMyjx*0XSeOZmO<79HB#>0t(D z8?Jli0opVS;R0Y*>h>u2*K~QUlN5}t!!(|~&5(+$)*P3#-=f!qZ95ger)bw*4YP!I zzx&-K*Lji8jE_o{RlTZw-<}E1^Ob8CWQG~cq_fh7)>jWT_N?FOydu+BY0-Lk+}I_5 z8}v7Ai)h5~G_NwH^;MCeD3<4s&byFvfK0x=LdbZV;~&6ZPV8J~0cCxMJ$N)U`%d2m zor%ybS0`UHW9hE*mseL)hxW55s*pBasK(t$uxe*7=o816#H}|x9B*n&*Sk8W`49hc z*D3>Q3%%1(PV{bc;=&w8#}e#!%mo3jj7sU5D;*ZqQha?fo5!5=lUUuTo3F!8bK&3kS2b!Ry$F3q)QjMGLz0<((G)wh zHM9YiSx>Eo(6_*+O6OBTb&I2khqvzEm7VWGCA)Qb)syFQ++vw|i>LZw zIz+6?hLX#`Utzw-8}?a;;+L zZo~rg=(2j)qwjs1|My*(2VxoiqYvceCgSf^;g@On+t;OsU1Q;|8pOAukKOm;)9=5i z1K!JMn%+hod{ca;0{&d{kKKOsyxaBvw%2&!U3^)n59nTbFZvAr6<7QM=kVEbO&nlJ z`{hUYyVdrW@b3e=!d>spE9~7j<5!=9^1OAymlXP}Kjt@n4j%)1jKuHf&x*T0{U5o7 zfBC&V1ekruoo)BuZ%OZ7`Eh=Ix=cs>xx&4wUV`qIGQ}%6eR=x+CJQVVg8h8z_iuki zCjQ;{ZX)cz?c@K>dZz2}RXTo&>ELyJ+V|O1@B57T{ara;nXdnLdcbE`f`q?B&mXmX z1>kq3-0$yU34e~pb|IDeqXRBW1DFg>jlkoB-|G8biLS1RU(eeEdcQKceSiE6c(UnF z@jgGsJPz0v4)7Vd?Ym>H_riKT;78wPW{rROa}WJpvBm@N7T}Na)%y}%|1s;4fAMzu z+Wq_dw14Dhc>kaHKal^BM*p`~7>84IMf_Khwf$@VRx4!vS6X3ne;zs0m0DI#R=Tw{ zN={Z@c9!NrW_sGsfq{WnKb0MOuO!CI%{-XvXY4g!6q?ZixDi7xa%?q!)(8jVrr>}E zF$fP0W9=dXZEFKL|D?nK6=P?ms$ds?6;Xc`V_9>5B4qWh3Y_tMmZj!7_DF8-lZeQZ zAqt*2g{SE_w-kifSLcDcVA+POi_8n$hEwcIN-Ro?27p$^NOopqpc{7TNvi3|5a51b z8Y**0Bj>vgFlRC=O-(0f5ON6-@WbA+z{0Y^Via~kTQ507cfiKd*xu08*3{D6(8_*b znkZkjGCfCQ8mcIUy>Lri%FEL*^$5GPti!DAqy(e;82gwi?dTfv5&9Qu-B72ucPMd1A=CtaXnuT3nvv15Fb@1opDJ-ki}TB296N} z?;)d7m4A6DyY~Ar6dzsOqAbsdoP}wBPQlfB0K&exrXT20;@w66Xp(*#Ke@95Y4W4Wdv<>dcj6tTu z0d1j0rpkaS)NcDHKUIOE3&xlKlOJIzSM=fkn4f>&FaL9X%5glk9`7erasDzRr~5EA zJRQkBCqMBmJlaC+Tma)Od0MaH)eUCEG=l7Vq82Qw1y)$qL)cVy;B>_L&m4$UWRaAJ zte-uwV(iwA1OO0JbIwrf1E@C)YqV@cbv1~&5`m08jEX2-kAb;-?8HcDqH{UH_TxF!H{M9S3}k;>8H{V z3&9vPEXkiGkSJ*$??NkXC{H#OQdU&fOce)tefZfqcPs86r+0|Hka5^(hq|737lY6& zzbZ@+jQ7%86#I8~Zx_FBe!a{qy-_&QlvA~wC92#IKLpKjF#dWNFJGQ){Xdc8kkHE>`LXo_bO17*m~ek|E;$&QnQ zNCkb-Z-c0+^vfx*677j zw8w>`tv^!-e@Q-f%0X112CJjW$oPH*l9ByV2$LYkBr1X-(j`J60RY0_93;VDcP4Lk zYF(nPDUc;6^V}`m-960REzB((uF8k@pIuU8HI~Ac7vDdF4QR~|t;i8A-%ib)dfUG= zeqZkPT(alYa}D6+Zj+mTXL(Fc%N@4~v-Kj==PHZVwo9rD=A1$)QD+gOf5r7*{qu`> zfG+!z*^M>N?hsP&VboT3(Y$rRSKCpHpSLsg3OJdz9|hm+nguQrOH4*(0uAU=OSsUf zmhaU}xTxb__3I~E?$%D&aLQI2K4q6D9%A0(>w&M`{$np3y^?yF-;heUf)!Eb=8bmB zNuFzGiU^jMajdWHsBOB9#lIU?bBcC&i77pX$OV`_KkjOCmaS|upf%%YTRZpz!J|65 z5DbM%5iH@ItP}MhcVR~Uy+wXkS8AKUszo!-RpeM|!@ zF>6o>-5Wzu9TkVf$C4xSg!4s~2HoisS=lMTtZ&!vy0X357FeMUcBi(gM|(C>y_7>a z98_>Ppi@U%tyzb`QCIOmLN8Z$<}J!MhgY@`7OarhN8hQ&%auh}cG}u>FD#MyHQV)D z<>IeQ+J#?=M=!d(V=y&^0*Xs={*d${5b!eaBio<7=aXD0Km_nb?fctWSy_ii2Da>@ zH7Gz(JV-PZEhF>1K&8sk!RM$ZkC(`o6)o(LIV5in!7>kuIwE9ofEN>B?*X_KE}kT$ZS$5Ey!dahjj9O1zTM#D)YtVS1$MoA9DS%7Fe z?!<9GT&CgGE_Y=qPWeh%m+%>8%GBT#Lr!bW#VH-hwm3eqJ zCZmfTPQnlcQaqHG2^MkQoQJ1khy@N>nMa0fUKCT3T$-ffPi4BaG-!eJH{tQZlzh4q zzh-0q$;g0?Yp5g5SWk+KTd+LuYOCbkkMEm9`QW;*@I>h80Dw*d z${QxUiZ9l|B-;;Mm*I9EkqCu3hhTY-L<%UA8rg;u2AdNBN9l9j(s$!JC-Aj=fIM)$shr{gkE)c2c2|bUnY0hV@B6dnj<38;RON}s1)_aZi|5PQ zMhBec6}#KJ@BoSD(g3Ar|Nd;EcLO6tS=-ER&rx6;i5)k?9bui%5!~)v6-cY z7{jAW@v*33+KK4uiS18?nDXV#l^v(EyQJA-{cXW9?@E^eKMkMNx}y+HZVI0OH*vo& z;s5Sk;gOyvZ2ra$c7N^PhO`|2%DetEw7UFXA+28WcVN~*T4q*?>Q$PS-rv|ZS!eM7 zC%ElN2$BUB1|^}=n7J85h7{6{ODP10jL}&v0Ll5gF5tE#N`Bzz=wu*UD)w*3sQ#|(Sd%&0)2*_c~e^3R^e^rI+-Tk36ofmzNI#Skllticl1>Skh1&lUUMG z9M(}BR9eYspsF0Lq#SK2P<2vklI0&MGn~IhGpsB}ILQ<1MiAkI6XB#c=0v#aq(A0F z@O{;daCOy9f9&b6Jw#n!eHff}&AR1Yemc#%?a0^Q3=Kc{_U9A7oc86HY!#|8pCKZ1 zYXbVUu62LOh%eH&GOuu?_UL`cJx$@`SN)C_viJKu5OO&{whnkr$nt}P?GC{ENH3<> zXc)mV$CStvkV37FRU4^Br-x@=VH%!WJi*R6O~MICSNFCx}gw z`4!fP7+I0*Eb&=npEq1ohUiP!Wo2+W+%Tqar=~{DX1KM~S~`{| z;S)T=$zjer$!cdv}EzTFa~j90^&^D3lY@kV*&y}W>_ zmGfjvf@Ukm_Lwzi6m&n@Gx64#{cKD!C(Hvo>p9Y!RrrN~j*-j$B4`26-u5_?uo>@Ns}T291hzXorry`3mI-Rv2zkQx`U zwxZE9;e;65=vxf&px|T^@G*hW4%$rFLS>QGbFUaosND z=%zLsTTnuvNo#kLTWnLgzQ{K19GK!qMjOD4ep2l;Z-l~cEUlZ^xmQh%2HdoO|?R?&nii7Puv{m2OM=KF<$#7SNR(LzON_2n;gDZ@*m;In@9|0(FvKoq-w^+r$3g#F=~Uo#-um!j9RAQTKJAo{ElyKe8~7X=!Jlm4*g2>It69) z^jtJ2N^i!!rmZ2!=t4Y-l!0}INBJyXbI0&-yd{`|B)fs_9YrlTaXI|IIs zX-3y>g4N}uXSS;SC8&|EOD7zO&og_H>4(FgQ52If?tIxJIK5~%|9sb`&me8Ve)7at z4mgrqRnjmYM_gK!-WV!PGT4$NaR6Rat#Ae#V5Ln}S2~N5>~A2yz&h?y9KzeCWyv;a z9Y?UhQ-3J*qz2w}C4% zwa*tdXO{ZACo69W#*AffPqEAWa!dD|8q;FM{7ItkjhFU3B&D()Vp-pkO<6gE-{wuw z8$7JXHrX_(m*>wlPTd&cLGB!iwZIpKs~=3aY2quzyN`|{rE9kVt}r$p)imqQ}qOi%_xC}A21-Llv*n`5o^ zgv1s4j8s;|c#bJoA?IF{{yoSQKh-2ML!Qsd2~JnzSyt;F5M;RgGWfW^NHe+F7c&dK zF)Tp9ZK$dPo_puLyzi1oe__dY_IYpAa;hH9Ejnz8MJJg{T`+eFTC3okwEcx_cs3^Z z;c8fHEUCvqB<)2AZ>@qB%Co6_Y-W|ij7f)wp_HX$@Fnp&wHEYEMLdBB+K8@CTn%ky z_xKYj@ed>#_)Y3c9-k!L{)p@F$&I?(kd+J{u#v9IwV(QY6NNUyKc)|Og8ZrZfAnBh zk&5NuwqOwDMK*5HN39vJ3yJC ziWzAO;c^bt#6loF#Q@b~&Z)h3@{jQsqOZ>9Y+t!D*D#Cn;C^ylH~7U9mz1nh1jD{&p0Gn1jOS^%18(W~BnT<3Lb^L)Ec_xQ6;1R~gJM}3kO4DT@jLk;GC)<+*`ad=eS?sG$JxS2bn>2 z%0osk?~52360)0CyjZXKA+Ejl&DXv@`B+I~hw zXd?K&#yxuv=aY20ovvU64RTW;IvpF;wLv9mgY&AYAlb80ErlJv zCLw9lS<}&}4=`S49}Gk3RLL4rJf2t6;F`tFB}E5!=^Luz!!iGqGm}`UK>X(v&&cDA zjU_CNgP4UdLI`Drr5zn@b#7@%uG9ljVT_0K7e;jtZS4KUR`rk|(QInO3;H;|i{C|+ z71i-8=43FFXWMP*@_2W~(QT;^><7ZVIEIU#orh}=#%-voHV=rjpWBQ$!x&329>4@S zIQfBw+Q0{fE19S#1rV|QBSCutbI@DSQQMwf{t01JD>W-zLUaTr<$JJBd3n=<*(1kW(yo47VPDk`$cqp;XAFXok447nH<5Y$gF~JPh|no_T6H6se<9Q z^__DTTe@aM0g&;u**xYDXv$@kXYP?$#07f zy(5)~UX)=CyP+wJs36TBabux2j`PQ=Hi|j8VknZU&WR(iH{iEcMZ>No4BDLs0&6b} zy$Dr|RaJbzMTfPok~9&DKLw`xK!z28CkZMxOCJdbNZm3$g|)ws>h7tIF@Y<$)J_b- zam~Q1MZLcCx0&Di0$OAXC>3+9JkW+V96Gi8dZm*o#j9?G8hzl$sPe7bYJ6f#6MvVx zeT@UfjO!C7vO=tdolPH9(_o+!JDiumnrV^C7 z@lG#02Q+hW=kt_U&gL4GvO5xu30>?P%K@kSGXb}u+g8p%KL|X2o!<47RomS-GPhY;Os(%q7_;Rs@UeKP5FC$f;@)5m+$eUuHVFy(-<=$NbO5wu!AL;R(v6mvQ_M zvZM(+gnaP*Q+7XpEf%nI0`)?M>uyEdWz_TcW}rJd;oFtj2WLk0dBy}Ste^G($;9JwVVbLG3D_5@N_AY5p`PZG26Z^S5f%tzpSLY9H`>|HiCc;J|Ei;3yd z7)+&M>E1^s3aj6${6I#)W8vC8%So7H(1yqwu_ZT|$%<&E8e3itK)aKO~ZgMb*x`oKC8sB*|3Psnr z#+*N^QG5MFN!2Mx-8_{UR6eLGC~0cg7ui_HF%&upm&y`GbdO;o79wh8+RF5v-pEk#Ac9+% zPp^`|E3XYv2pW)gd<QTdKA3Wo;CZ=) z3&r5+0Ip2C0-P6-be=_Cf7g`2tn_1`w#`ZFR<-HjJ+m!ByBzkwp*RE`MUR?S>N1<` zzO0@5QKssxcO?G(q;N@1V&?!=G*Bh5gH|B;8MjbjPI>-BfDPrB-Dv{U>ok#9{-5W(!!<|AH1vbV9a^5GRl{boDaqTxZ4VW2`^|9zv2|Q!3 zF{9Qg`7CFQGTrg?bSuJAy*z%H0IPzQwZ#c(6v92U8RV3%1B7N8!cP4TsQg)PIJ@-u zv6*gOt2MtK>AcqbbN9m3r8(N6?uI%&zsTEtyOfUcdy9R6k)d}b^F{g^Gb2rGHN2+H z=5NI@54{yn>+PpAgC4ZrDdx~MaDL5w-f$zQdqN?B>=fbN+KqPD4%;)`tiyFNKC;i~ zGEm#z%i5}W?Vi|33wL}W-3-#}Az3RMWbCUa9M=+7f+tJ0x*|u5!h3KbuDprvvIHW|q~**pb*dMIf07M19`2cM>T|rX4?nxR$ZDMnORnM9kk(upa)Q&`A0^Hv?C3}C zB(4o2xoivJM8!=v>+WGE0(J-CJ`B0mm(s}U2eFo{1xKExhGl`Nt8~(yNiP)XHWn4J z9*tgfF~vL~cUJZe-C`HK-7|W4TyDz3Drzpd@^CoJhCV5ljE*1j5i)g}cyT%oRwN7J z(~PEUsTgiUOX7B_s_B|W)^u>pb7Wr}{Pl~^@15JVFXPabZMd!FR+Tf5nEay0RyKX( zeh)<+vAHPE5A;2w{L^x!MlHAI;Zv?6Y3tXFy}^HfaLcH^3=?ibb`Pt)VpEA|Q_lPw z!uP8CXP!0miPpDT3Sv#vEP51c_@GOhrl_>_NNZFxbtzFgX3T#Ii}rAQP~>ROGh9nP zN|`d#bFXI2ir_x;)XpXzgG}p`YQn8nomzrQ%1IaN?7Q16Q)g_6EbBT(&mJ|c&-fyw z@=A5@D#Dzv1)=*AN9TNTG*Knhw@5jdypXQ42I=VvBjds?mlQpQO#6dX6=@qJX=Yit zU``G1^Paeb`M`Jx`b`52k(Zg3Tb6sAK`xwHE`y&_1U5AddE+DBc%JtNF#N{ZCUr4W z?ow&HRoVh&pn<$q&MiIi5@sk5e9NFLf!wrHcCnM$6H2Va_D^E7KsUYM;`Dn;uV!+B4=C6uQH`lkzZX z0A^%EN&LaER()@6nuc#Jx4|al;!1|oB3_m#vpKt{BBduUuY>}fMC>F|{mF{H zLF3Lxv(kwdIY+D}bADjaqwA8>*hKKGh^Yj6Co}7V=N1@jhS6Z17q4R+9F(Q2@9flBiklnRq=9_nYZet zvjzKYVce@#r?w>wTw04;2#FDwAH>#qWfpkI%9Q(N0N8%K6y7eCG>$l1923@H9!BF? zr;xQS%GYMaL!cTZOies5&X|?!L}e9LBo}~k?%d!9lE*RHi#qK@G?Eb zY)5l8F*%HHde|`0v#=X_(D#>2g$hm?L;y(o9&W=5;Rz)P%vmp>fmA_FheT@(;1n$u zCXAmJw>#4#@4cxc#_pwJOlw_opC!dP5enc}sTagEb24G!Mi95}9gcn(UK0K42bUujPKc~-)o_tSIaJQDE5c~+Te&OQKC~&Fl7xhQ=n@RZwCKpo_ycYlBM;8LumDL_`7s6o zI3^`D$kFdiG||pVO+8YrzN#IfYVIJIKhnHAx6>6j%#?7BAw9evoQ$+OhO;9`IM)$V z?yijJ(pS01;+s+HR>R<{M=gzA=dm`$@9ise7?F;(mrUvTwOP%mSjP2|hLvZEMw*~! ze9p(6VqV zom4U1jITlwoW2R{+S}Q?p76ruQie<)G`}=Ld4-h<8ZUI|r&2=IQk+OW9}*TCO5D0`n~l9OU7=NdW3kgP znHAL#lxm8dZH(Z3Ns6w$JT@~=xaP}MbAscPM%BoylX94VARl`pdC%x0meTZuu{SPx zK6$)pa;TddP0q?0klm1TFUEu0gyRnsUWV-!Nj9 zG13XBrQqdmn&ScbhkZh*L0!6Hj-{!N*a?oK?TCbkg_in`-_FXO-g(?nO6tW`7M;1%l7YR62k#@uCTHyBlB zT^XfTr4g1^2eZoy7E!^kO(YrFYnBV05o0VW0fbbS&jq5lxDhrvQNR6MC&~gjpuCC| z**LKvqW|EV#Pglfbc@BzAYqkA~zO4&%Hwk?ujt0uPBDrR1yb0*PTM-ZI7-m9A?PQoevo8KH~5sNeLI*G<+vm(TxDF2P&udmqQSK`0^>#&Ph3b?LD#wgkLqVz>C=J zAE#AA>Mz)t_HZ+hOhs9U>vc6DcRI8+h}5XPNwik_*eI%VVD~Pz-KYzd{87Iy?cs%9 zWHrp_$n?4g;^ErY88p9#(XL`xcF(vV!MtPj4%Ia+(O|QPnAFXBXA+01*?0%HOlV8Y zszmZaHFR&&feRsDagy;W=+JoNV2zDXfX8fIorCd|+Dc)oyv$hUa-R~bIJ<3cU*|r zCe6aOQIXtB8X@iAA-)LGRiIt+RKhoUo}$S+BAhpVfwRjoF^6(hw30hCC6O+Le3@er4Cz$r#oWl*sat=nk4Y{c`x) zaEe>h)UAeK@>2Kfd1O5#c%5!&(mf4A$OGmyXy;gOG?b;DTg1-eg37=7IE}QM(gnRhuB*B zls)Pb6iO+@2Vm<1WH1N(`mk=W^)Zq2;VxiwK0N-sXpFjgHgod(WAfd*w&xq3&;Pyo zh~}i0)RvN#Ihm5gtWX1y19u$@ZKZ#BG$w`MHR`Z0r!=%s6dx7V7RnXa%i!{?SI{*| zT7nh|l2|CZ9VI3k_lazv$*28P$v)SweL=K3%9i5G3%3u>tjPkq*v%%^0(DjN2G{E% z{ONs1U>c0WFn*BSHTepCtTu&pF(>PDuy_KcWhY_u?lieH=4ARIq;dM7%M!Dquu0Mo zxZv#nML2K4XnK0O@vD^aw5SF_vq?3AHCy&i+%AaGjf-UZ&uACMEHx3qh5)ysf{#NR z?XnqKILtC-8LUQ|Li=C}xpYVZrESqCa#O3O6=dXNJ{-I^I}rnL^8mJur`GA)lTHTx=9XkbCMeQ#Y- zVz(l~`4r4q6T~4cUgd=@KtOwY6>8N=!B2?>VfZE~JQjusLK=QK;}kYR`+V_hZGmPh zq9S^$U|T$vBiwlDSW~HH2@&3gDXJfCY~ivF#6z{}8)Yd-6&ISAa79epEJ3M#Gc2>NTDfs1te86P;i#<~IejtLpvc13i z2ZdY^ulDo`?{R47C8i=*1(3>Uv*;zN8`XLArcEnL-Ap0!yEu9w7keo77w9yJ7i8UY z6a3}$Q(WT5Y1zFH(7)C%vX$DJX{_QHPdip4Yrb7J4GJ~iczJxdc<>N-Pm;TNi6*jl z?#!VJ?o%eCD2~WKD9Ha|op0G_GqnWjbb&Nl1ac}jS@5}NNlDjB5p3DwR#7qQC8V1* zp)Wr+7F#9kr(IWt$OzChym>1;BYBr=q!fcUZXUhc=F?FJU--Kp^yNQ{6*SE3+((uo zAF{DnXgVT7q%4z#qp4Ww#YX>3$Y+VL<1n&W&~~8!Z8OM^*i_+?Wwk|~-^BCoEZCPF zihpg)SfVx&R&e0P=l@Kslr7*G2boFDdC;hbxa7Y?%NZ_PMPT|C4S6t`yM3iargq z{JXu2Oh~qaSK}s}$RVm_BD*)KS}mji`pf&r?4C`s4g8s+h?4YZVt&#Oc?k9T$EyKR zW_Of;kU!2_k#IET*;`<-KV%WD)a(Ogi!;?cSSibE{r4@`YtDBAiAUT0y!y9_wMX-2 ziCw4YD`)z5nj8u7J&7r#-g`+)C=RxNVA60H3K%O5X(#QkMhjF~aI>VYozhs)=VnScRks^ZDCU0TFSi&akcw=b9mZa{_su5SJsk#j)Aau z1w_Ap@EPw!ez^zv-B^D_5asjj5cu?}v$u>e@a$cK6yh+Pv2(WL6_hSLdT+86$p;xf zB;1fjIk*j;5xt-GTh@)xJgq z8y;q_v@%|l_)IGm~Fb}epw`l&N(JI!mv&kSS26jb6J!p|PU(t&~@M>3rU7`9(S4ERoc(44d zCCgbUn(YGm9FafqjMzS4V!4&&6!zL@So`wh^0{mWI?=ZsUn%n?J(^l zT3aG$Fw_*NsNCvAn;MWPHI$t=5eqKe!!^rC19d(^BkW21u=?FKWwqNb2t3F-iL!~v zNX8zBNt$!5Lnwk5&$hbBSX1%!|K6g=_3Ov+{S>&7d&-A<(&`i7YsuE2ckz>zeo-h$c5Gb z^2+Eg*lZ(g;%awAwp*&9KVSEj2Z`^RQaPncFA;S0ZD9@-{3=+t%WAXsN^-fso=}KZ;=GR4yyBuLzIXqG7)cU#3p6s|>nsVM) zjQg-k*CuE?v0y0g5PMpC9p&Mhz^bF(DeQv(H=!Uu=c+Lf`6 z+`dNrdTy`OdZlr5{%w>1SB|J4&{v5s+~$m?xyhLWb`vhE$|$&MHMT~e zB(n!w^~BcT2=?R8nUUbYf_tva@N`!Ij!yzb#<7v|I%FW)zJN{08yPI_L$p%p(Rn(p zq2Z#~^a|sC-nSD%v}LgA9}A=y>poQ2tYPHnM!eEMo>}xr&g#urf~TJte`GIT@Wpxp z|KZ#ZY1;evD)-E0B+%FC0Hf7aYM++-UuWp4hH*lq4q1k3Jq6&YMwqd!J7$o@e4t>Ag@&umjY`D<^weSMDc=aGln?ZH!OmRWzv!+Nf)Q3Hux&n^m3;ToY* zrKYu=KVuzD!{)bMPsxFY9te-xbr||`tu_2&Iv5z1OetXA8Rc*6OZ^fK^^fh0mcmqs zezcRE!#Eq8SBqE1jsAc2iYxP6~d(^{5$Stw|*+na_ z{nJDI0`|jleH6oF4T}J8Yzp3DmEDUwY`@vj^c1KFFPUEN6?0wul#ZQNErQ$ASoq0b z_~vV5BgM1FFb|^pE2uHC9ZBh7NdJ56M_@#CDq`Z^(E^U(jtpm&N37YPzi~#ijNtE8 zwYv45dIBwg4r~fLNuu79v1mWT(NP0ei0dMhPC>&m=as=q)%%aC6<>=n`j55Q(+vcZ+228rFkF%FL_vZ6zOp{* z6#LQi5?Z&%w;VFx?<2{l!7ZCwb$PqsEo5)EPmw6i6lA3h%LxaMctfN9IpejY1%9(^ z>0?PnIYQ^Yj|P`0(H(Q2nwt>sAC! z+1AIFOE<^;h%CgsiphynnITK(YxWj2f*7BHgythqNY;n66>N#uQ2^J&nWucW1Sy? z3QrIa{!U-!vsd~v~rjFGO;jS(~2l~-%^rh1PK0uJcAoZO7u z=Cte_WTW@iCEybW_4Zlo%`>VCphKZtY=VpExTFgR$q8!u#BtOoLi{ObLE z*?%1Ly3u!HQ?wMVJHDT});r_=%QJ@k6@7Wm9(j&Dpm>aYwa4xd90Zw6@kzM&D^7AF zm;ER$26_Hnc@c8=q`$z!T&_KO-b*pcdf%9u(LRKLXWc3IPQgRNNp z2;QjM)HpCyHTMJ!r1t4{1~JGTNz_XddD19-_d2fgC{xyY^6^^5RRLhaEtgdAPP0Yx z{2QF)o}sb$6~VfBV8Bip{AF;N1QyLyN@5A9gSJzIk=Av2ZX%7z)(L#hqW1|XvN1SN=*ryka8O6I?CB;{qCm7Of1kh~5}s z8dl#Q2lp*Y$eBg4JanP|(g-1fM;(s-^oA^;UmvE-e4X`1s226AZr5#2>fIIl>NCm* zH!en7$GT*!WZxRx-x_qU2Zx$OS_pDxT4OGYJSwK&WiMT zWX;7NPKRR@A%SCbrv@9DlQU`t@HfN)56<7@A3q zLB1IjdS>=@lb4vQ=W<9UvoO%xUmW~#|G#xVifqbjH&Wz6=*SG@{3wT!6^l{cC~VPN zW+>}T<4;{XGtWY@$P@2F=EkbHQ3z(Q8u7ELd|XvjsTtfDW27vn?L^7no7nsV?|nT? zz@o1T!~&HjZqb2QDYm=iJV^^flK4e~KJdb_$0^T4Z*Ho@J;`(Mi(V(~yAM(z3=U7@ z-igG?O@>-hUilk-Po*~&AVYQ(xl+5_pm&qIGF>c z3soPST6p%K*!p%JOvjh^3azt89b}emLZtQW7HjNSn;?A0X#LTIMd&l}L-DYSsCHCY z4SG?)`9XBRsB)Q!CyT4F6G+M1;IfcW53i#)mqBT86^no5_NBapG?ybZrJ59qs<2Hd z%zkAQdOWj8MHuItXNL*83P%dNQYA>mY%?t=N?aaj80CY!Urs9--|D$ z70MJ-9iFq6#5L7M-OVndM+QsBgKhgFnL*5H`pP8rC6b{wT{SA|Y4ps+GcQ);3A>SJ z@Io_o6Y}BZn0l&7Lzp$}1KrOqO#57Dmj%0fx4@@0}JC>5szEK<$n%&22Rltxl;gaKztzOp~C zP%$B!@g94m8T3L-PF>XWa?uga-?hIiVyhT~QtGixrI;gAJuvC$g48xlVp7O;)L);K zt#v!`1BGQ{rVnqep`_d-^0Km&r&7SCh(HFd8s)N__Q}m1U7&IDK(#cM5jq`m0Wd;` z(sJxamhe>h9AbeV@Kd(z3MEdS{PuAE?yeVS#av055xw?^(Xo?S6!T49t$IadkhIsb z5(Zw0$pU(`E?5^FNNp{hEPkP0IC$;hI|eH3Us^(MR%P(cV(T%Mxy?kimrWerW9E@d zU;e$)i&dhz$8%>D9RXQUB?YE5o~Tf?y`vOSA0ro?)R}z_luyV*Q{Jw`#ZY5R8IBgc zL0d^kpdU!3R>LylbbNb;)B*aC@nE)~(Ufhp>phDs-zVu7l2}9Ng^IopHK>|n`cG}YC$L#|s;0B|Ad#0$ha<=P++@<$=#!r#|6mSKD>Ye}V!`0|u1 z(P9u4c6csYb3YrSSAQ`%h|$@q|Bg0#c<@Uy@G&Jz^oZT4@?DGB6-ckI_Utb=Va*vf zel;G`EOzl-Lvy=Xes_QzJRyE{THb%0H0lB3tas6x5o97Um06ptV?UW>d&rS*?i#8R z958y50}*Fb6FUkfzwm8@7^Ozwt@kc#AQInMvzSZS^nh#CFrnL#cL#CTU$cqKOmk4M zP+4tUH_WjdQn?o?ZXg|L2mhVofdv2d`TT(DV3w=~U7M2&h&m4Mhp}{VxOdp7of6CV zU|rN6qZS<--)jR5tlqvh-w9Xf5(z}7nwPV&4tc6l40K+yZrN{^odUS~d%d+-e+XtN&UzB6_At}ICLm7)0`L~-0W z)ku}R7};BvUpVr2U9A}nrv&2fU{k<+zl|Ke@TF-G(<8oZ6$z9{PlN&v@i9y6#xlpm|qlD>dk^mcO7ZRO$aa-9BsdUU+iDm3s;s z?BPwNx|5!ziP+#*tc#DWAddomtJNM8_}tOLS0+}_|0}rl{V}?+StP1-g<|jVETPI~ z&(iSPG^DipmtQkylHFoL$PML0qsq$Q{&Z2aaGS(Q^Zv-u9bqzj zdLt$a?irk_P-e4d{el;;<-YW&FvRTl7egC|I}ZwZPuQ5S{60P$KcVft}B*S$T$M7>TwTWo|}+sD_b#UTZ_ zN0zHXK>B<|KDRK<-RkiK!Y4Bn)rS`nP~Em>S?f*ik3gi~vo%(7ul(LeZF^}u)Y?uF z=J@>uKk>lIw_y)qkrAA$>)=9aE&*z9WOd~>QBOh&CWl;Q-&wx1U4Ngt9jZ`Fhn ze3UMa@W}PSfJ|{e+06Ntdf)j4M2k`XsNXiHIhSbnWRVYKV&Gm3n^oOsN)`Vi0h-*a z!2xol%`$a}GyEl1qjbp2)g|5|#mn~<+Q9CSggMxp0JVxnmPs^)6SC3zoZF)AkPXtC zh`GY?YUgkbZ9(yCSQ&J9zrNLo9{>K0T1d|JM(y)SCQ4#zY(4v1?L3u#=)mC9iKbto zFO2O6B2=r;H}vnuj!q@VKoYqx!M#Y-FY;=<;drr)zT=uY678~@SO}HE_KU~yRgK-* zF2Pv$rwy4kSq2^LB0-?0zuq@#e@W?A{(j%yezl2JiW?7#B&feM$ng{W=~Zxl7{imo zTEH?_aE=IT1c|!Q{YrQAcCex%f^x`Hwq?tF?rNut_eIDho^M>GS&%&(t zo$0o**sAr?7yfE`ftb&!3q;5b3#3_v*F;bR%rTJrM`bzm`{0$^=h>w>z`k2~&ZQRM z0c|OcTkVOPY^S-BU3*FUoMTUW4rD96^M@;QRVc{?+OM`+v@&mP$2Kc6q!bY22tNpe zM-YqW)cue)%$6hZH1p={N16v~c%rJ#=Q@KCWZdL9g(K_>!hC`4_A5w|S2RDDv7~8} zss6FM(@@y(+byELx(J$MJta?Gz9Qw-MDpo2W_-|D%NM$0j`hBUH;aD^MJ^hy^r$1E zwVnTWx5c|!O89$1&hdN+r&52{aj4z}Po9iKrNa@x;o<+YqDC`58--TA1St=uJ*dWO{XF}cPmlLq=skq@tjeEez89bG#^m#S5A~S`X#Uru6JSgb|SWD%3&1L z$I87Ey${+QQ&p-$-SeKlupTJ{Or&>2j@k*sT*;@6TXl{hiL|wkVQBoMit2^pITXjIxDtc4kx-dX<4f4WmQ8CzSy^o9&*uV8l@ow*vf+EEJgoW{b0Qrlxwb*~qe1`wz0!O}Ml9 zMc9o>2d2 ziTTrIGoxSBIhps2?JfOoLyKyJ>Mssc%?xU*-{bqomzf&mye}~6gMES7p2>EKAR>m; z@0}bP2h4~Gn$)3fkF?r1G@bPFJ#f5e+=?WZUzgO#sviRC6>862d7v$M^0Gca(=_9t zl(-H%2&W?Ex#2Q`MjKAiQRFMl93#`>ed>QL+#)-ghA)cY9g&<@U$eb=ju2|aBn|4U zp7`=bPEiJlraM{g1!*it8{QF1p13QepQ-3sOS;=hgl=hB;aQP0+DnU(aJnfModu)+ zyDy)-B$_j+*=2N1ws|v8b-NVHARP5gs46I`8f71}_c?|b)YI=})E2lAArwQlk-sDQ zMUQ$+57f~VA(dB9+butfM5-1={bXXCwe@p~l)LiTa*qOw-0X8|dLsK9jVwWUFNZ>z z3&ludu)6LM>Bq_1kNnMWJC?4+V!q@*THaT{hW0z7v9^bV`G+bM`@;MAp-a)dF4Mp= z3cXoxR}E6z5an*V(qr!M8J+J7Xr`E^qPq7~#jvxXKO**;E_Ob*Dsl!!5jGFi1G_>< zRQ(VZhB*d@IEpCwz1lDzG1(-;dm=%oJM6mc*Q2~OkEfp|>t>v(G(FxJ+!@ui(NAxW ziXPp+p0Rg4y7%-*(~;<44<43`)6J8`MymYBwMSFti&FIak(hhDv_M{tGt2cgbq369 zusRQy#M+oI`j(pQVU!WNSl~wj%!d7tkH7i52B^O-B&gRDNOZczyGS26qBqK2{l%Sp z+5s;YmWUO*7j=aS$km#ShH3?TNrKOq+4^;UNJ@QhnY{(v>q;b(Ps)PbX1eCSNaCq^f#J2#=5)&M%T!#2ixKrIg_wUjt19DHzYBE4D zkYbqvUb0^!XQ)z%LE3L*|9#`#eVK1*^J9b4BeMN>GEL;NeBQ0h%6WI{6nXl=7Id@* zxrhU+Wh%w0$}6lk;flZ~b!v!t=vitmE3)0*9j?REggVQFV8XNc4OK0!2*t|U<&Oj$v+=> z&y-SMsN|8f@ZMPR2y-CpnD#?Za_992&^^xNKK`g?SxjAmNaXR17i-NWFD+}DlM&A7 zOG!^VCf0*AOo_-bHhCs_9KW_{K-w+!$<*f_evk=6knJM#UKo3vKIGNaJp-GvgkqT@ zGq?uqRc3gw18b$bK`katvoaoG?+|%?HQzzE??CMx*UJ32s?({MzNtAg=F8(Mr{TUHb9l!Z- z3O(CvNX0m>dkL+jQuL3gpv)y2G#LB+5^`uAX&ipKIoru>b?BA(G8(%X<*+fRre)I# zJiKVFemL7}<6qtFvhVeZ*Xz-DEb+)n%F4$$?;&(Uj$}-Y-6E{0;GuD0u?^#r>a|W3 z^Hmv7AeZmHlqJCGc9A_Z^Jf-b1BmJ=R$yRo=b>m9RV5@Td~fE@2g7p@sBg#*sXe`* z44>>e*>`&-tE&*u54on_PxZ{tFloeYlI9l|!5nV8D>)^qyJul#1U?6WxEPEK$Hdu(}RYw;+s&KVoO>N@xnH&o$kU<90a2DEvaKy-)cB9@03r;zz} zIA1(%a7rpkwZ7{K6Ytj>{z3d&sj68Dfl*RJtv-1!8_}O0FCB*qao;zLwB`BHeUEp6 z*5j4L7}l{PzQ1&1U*@3+tCvM8d-wG?#e(t?e31IVwrk4z3AA0;tC#fzGQMLz=&%C( zj47jFzxX*AQ_m^Y{W7!kvky|>{A^%55M$L$6fi2=-pi`0-A8@1c!Q@pE^zYSh}UB$ z({SR52rL3stH}I%2&oknAzy#_nsMd$X$1{WbTMHMofc!=OLI&Jdo!z2#rnt5qUN^t zueti5QF}r2?(x;aFH8dNQG3Q_} zbL9CB(*l>$F`O=M4`D@=1A)Pq+CxAjks)+FVQzye%_TNn{k%)+r+Rl+tr^uxn4yq3 zy*ogDSo@y!$#4u_Re*hPlUuBbpoyaMvx>@@{Cxw%-t|hz>3!d~4!O^z7qsea$)K%f zZxIw%&P1N^7vdGc^x_o%)8xKYoX0MY{j43hzM1B{A95S2*>0s?wyMte&Dm`I zY;6tkHExAto&gS#oP+8?01A$ zvffr7J^mftO2mtsDcLqVZT`QKb{((_4ZJ+56`#6C;>^5E7`xevJX}f$aI7t75)E0Z zSZP5b(gvjUqUBZyA79(5aEYiAZvogrw7GlHnq#yPIz^Wykd54(6zgKun)_^n80lQ8 zyb@sPC!Y)V#WJ|;pcTSPkr|opo+R4Uk!iZ6C#xmEnDJZSrMB{lX{@U>yVo;vHHAZu zd&D{z&uk5AU#CQ>HVRejXfLM0ChW54OkL*AR8&HbD&~8~q0%ju2T@U=HRGvSv(=7e zJ}njW9}i%m1Gf1i_DpSgO8gL{2H}o9`UVXHVMosM5w~l_x6g;TQjRgq zdh=Mx^4|T7LOccj=+2y5LO08H;w=fOV$2<2*Zc=7QdlT?)r=)nOHB1?u8c{q51iR-!WB5D~ zH~l!cxDtDX2fT2Th-6lnY;wcLCJ$ACZrlFMKSt7pM!6}8HlHvImOgGB^*DJI(CbL8 zo>=L0&3UH%L2;V6rb-1l12CBi{eFf^vk>Pb6h9T+zu}!hYr>|pPajUJZ4lmH7~aSP zMlYZJ5L6exM%9qU*MRoH7zG@p8;{`Tgg0z#)FO(5id*5vG@g58+-IF|D}aAqd!) zx@%s_a?aa%iopYkG#2$#_^##-XDLCQ6!W=TVo3qP8MHFhhcOYq&NZ^GT(g`T1W{Lv7Bl8-&w+8X>&QB1^+mmTO@jA@NqX*y3oI*v1)m@h~z@USC|tEfEd6D>v$=KWopezV>% zUiW{QJCh|64c7Udf;>YtdWH$=O77;IppmE;|HKh-u=X+)-s{^L^ z7Jg zDZC=8XII*n=@+LQnR=z^=V=^bc!hMEyLXG!9w&s<+>Cyy_z-Y7+^2dF z2m@?nUNbfO-~A6ShRDb}`2Wik;Qtj|%lW@r*8h*50Ac@6%zL_F`al1e_tfHkm|cyD zKd8S+vj+cVPy57Pl(;9nH~H%bi#oJ;mrA5Rt>xF=+7x}rvH8=NJZO;OE<7q7$u^&W z;)}1+-VHGCnls3XKLX=HKdzx7dW~Op-%!6x9J0w$9gaSthpi$mY7`rpSt4Zo_@w;O`%`L67TJQ0GtQg$2o2CRr{@2vL+5JUxBJbX>c$s>76Q+~6}-r7MZ^Ozc}_ zXM~?O(l^y;){+mCM_KQ^^pa#*t{vdLdkJ7--+wx$>WT}|-}Gjq&pyL@m!>3^-|-$! zf{@ku^s#Cnba*MCYJkx7l$jOmynQ{M(xa z^1Ae{B|r*54yET(Rdy!;>MgYfHL}bN$CUBw*8Fjq0gd)YAm3uMWTLN0I^GHc-HEg* zol@LwiUwJRs!&2bpvy8=i8^%W*nXt((lC~7CA))z zG^7d9-LUQuf_Vk?5t~XQGbaPoSfx&jsCu=f%<_8sQlaMn6RzrGijY&P2kI^@uWFmM zf?JBCCR8E-(3+yH1))S{K?2mLR+CvC;f-DvlhIgHlpH|qt#n@izw5;zSAV$)i5U5{ z-tVemt+(e5x~sPQD}bvOu87@ecrAKNU-J`#u3=n$-^M9L^S)h~(=O*w7j$?gbf37dU{Hb?&Tw44Rr zPrV-c73rVaE1PH8c|&1Fexnlo{<=r0asi)kRiBeBY0oi)x@HmVkvKhuRI_kAZ3B7# z<DOhlQR0 z7P>a@=&-lc$38Zq6GK%vu75QEIWz6tn8eRJfQ->25It3HBHfu;1eHznY{!Nm^wC*E%tq0dzG{z#~Shx8lOZm z5^`%bWSe=xSf6+mF1>qIQG1gFU1(%tnfkSo6lZ|mx(m-~h5;kduS#n&lPM4(1TxD^ zMmjF_l}8@gD#!VW%;PQXo1Yk!lg&4%r9XW(8AapS4ePhILVia_((Yz_j5&Mt z7Uv&8T$Fs%`tEuCvq>&h_hgiGD9KEzSsqltFX+Rvk1NF zDw(l?osbP>3fFX19YyB{=C{hOB)Qrp)M2f>4zMDGN*8m3l^o;0?av_}$`BWopjY8E zM91vuu1gB7&ajjueR8@I>bb|-UIU9T*~K92`nk5Mq@2y?J&D7?(vS%iow6JI)K~2Y zy(z>xJ@2}#N$abGy3oh+xFlN1>Wc=d&V6q_RATWVz`1ntz;a|^G%wXff0ZMZsnb4%SX2HuyV&NaDR z9prz;Hp8px7feeTo2+R-2N$)%tfij>X$U}11Uue-%?<-s1NCbAL|63bxm(I#QCB8; z)EvcAphL2wK=F!gMinFN7T4fqv^(1KBiUC~$@`D`%AdUaF5rzdYtDa@`Mnk~$} z-K|;$SM;7G_v@$cMWs9$7JERFmX7#G!e;OjEGT71xt+e4j1J^ni4ETWE`7>Dlm3}= z^|Zg`ZaVjIA;GCuq7I!|!55%k3&SxUDYb!u#PL<(*L~a2JI+ve)h`pW z$vE~sn%SakCmyyrwlcabO4CmqvMY+~rhr;Ja%ilJpQ zGghAW7aZm)Th#>8>d>p&ihKP@K+rGn-zGuc^@^I{N4q*83?cpguqmLi4PCdHwbe$G z^9AHod~63xMc;#9f%~=b$?{i=^=Ie9I@sR4c&}nC{$f?Mdn4$%az_EsL}21_R>QQ| z8ku=*{0PJ)X9-Wb!GYF>Jp9B+*?*(sDHb>AEiFl?C5pDb+7CgzKajx(aZ-T-C z~MBUJm=0P+2k^bz9K>qP9a9L(~*QLV&0deegvjlB9&zf&kgf?(~h;x1*WIWti z3k?PVY*%v$?-FB>qOD%Q1#tib32&e%dWTzW=sRprfK*wigWqLyAzpv_S&f@@u@yAaX`_i1z6aZJ$RL3rR@sm;t8CZQ z#45dxeQ&hf{c7TfF1+X+Wri2+w(tp^LHGm^*3^uFD4k=tOC>&26(4IREY2>~g%y~* zrPq_AO?#cx$nH{k%tV(ei>qwhVKx;#mcS^wQDf(~K~y+sC$De3q-j^A3=R$nJJo~bSF35QcUt^1U7v9q~l2QAy%`emaa50{8d&P#KpI_9; z0T)0n5?w6cPLcG$PC88GCph(Nif#ej(BefWIO20lV&^iN0}3GQ z0Y%&baJ$d3!BYuj+*jR-EyP9$blQGkD{*j55?^y60{0%pKezniRk>a)k?aYw)6wDO zO(THZmF#|nq2*dvJm^S#iNuaIAu^l=%mp=F+$piwb@E1pm+1Tf$BzaLwoJ2Oc7@vu zJ~BX`4b1)siq0?FY8l-XdP+ zlDGv!?C&+4A^NGsm$nMr66+JtlReRm8h@7YZqq*7fR=P_gmRgs@&P5t2L#@Da{x2$ z67ClVT0(NIM)L(mnq2&Ov@F)c9;fZ_9bJ6elQ10YGnLv4%(XS~v%18dXtytwd%~3l z3C7pA25WEc&|?oOd>udy1bjiDO+R$&D2(SQBiJ-N|D|VpR}d3z?w!6Qt2|K4hR=9! z^Y-ASlR!H7ZT9=^vxt!FO*j1mf0hso&3uZmsT~kCJpcXno2G36tYsB1?O9>dO)IVg zk!@PDV#$QrUv%_N+x#!s6ovoh$IAW*o4({_^FMJ@6hdA1ple#P9VHCxp*)-}VOXFM zKMdy3Btg7XGIx(sd3e3##nb7%b|u$JDe&qeY4<3Tr|c=YUz5hg(KS#ac!k}Vw;q7P z&n}dEGmsd@;qFl(PrX)x$t*$GT_gvvI;`$;8Q>}h2c_$!p#)pT?r~GzWf?eSK=H## za&H3AvM!N9OP9z4VGMmnNPl4t;~fI(hD z)U~5*cBjNU7@H8Mz#{u}r9Z|M@TPs}!yr11RD1(i)Tj9A<4SJ~rXaK^CgL1RrNJzo zD2sIx?i_j(lVGt_$)rZosRF>D1QS!UuhVSU(RrWZg&%-|4p-SK-2xV%B}lK8+5~Q- zFNYEwtYquXavipfxkT!ZArA*~pc&a&LA;A@py@FvlXa5#fg)qzqbmlgrsns6GL{Bu zN7AS`QuXq>D&E%RdbvTeC&}Fe=NKT7z9=HNBmlio4zfQfxPJ7O?&WhJs;|)54I1V_1)#-6vZzC%tk!j$IYmnj zwkm#a2E|UY>D;LT&d`uf*6Zn`3SLAvUJ^f-@uSUpbb$;3>5PI3z>BjXx(=}H!_`KF zmYDGUCE4DQ5Vk*V0%9Lg_^C?01Z_g~j0X2!uX)8223unj;l*ePuhaZut|1;(%-R>0 zmqG02bB&;QO*ZIvFTMkJ@=eP=s0QKG4I|G5Sf$rU_EL>qAUiEJ_hMaqVu9#(P+*uk z1BL*H@mp1Pw+VgLT|gtj$}T{&B_6I;<0mQP=Ba};ti{lf#bK(uqf7>#$T{@ut~UQ` zjgtv}rwL&M+3UsYG;x}N5w^1oNnU=d#vg>rRX{3aFT^wOOc_e;uOTanJ@QMp%F%4tziyAcl;VM5#2t})Wnf9Yfqx?pVUqnI| zEilOeKr>Y@Tc(LW0bkRdb-c|+4vQwd%oeXewi+7s@%K#+28y4ZVFb}IhG3MN8vk3Z zLc(k!`n13vQN#2jP~n6&5QwEg-TK9E)q3vVY*fvazGhFr0 zO6B})_kf#zc9qS(r`iKbqnO9MwmKxNu{fg}Ln7qH; zk6w}4^<<0m$H^p}f5qmrRQolZ?IqPx=03|HZzTHw3lqF2nXHp{CE4E!a-HNBwqY2T z<7M$3DB2j?UOrZccWj9rY2p^0gz=l|elgBqFgw5#=?)72nkzoWAdFv;fo>S=FLH5e zyKEa8J$3r)7V(X`#Mj{|JrnTc?5pv&ZE;%%i8agF)1`sH?@;UMVi)h#S=Gsb*o#{w zc2JG*vs>jYo}W*U1CS*wL%gqVT7I+^;50$DoP%n9hyl4;XL*Cp{%wp1%p6GX@(gku ze%lgr4GF`K{n4~ugL&>+{KyWml1V#+&1V^qwozQ&?Y?VcYCx=TYfq{6^;Yo(ZuxRu zz)HdvKfFa~FaoXyo0f*gKi0^1z>a9KVQiP5Tf!Ix>jooyzfopwpH@GEtva6DGgV^S zz;s({IFTeispD{6<0TiTZH4>0UNf+_m)-C9`QfTa8Dzy{TY1rJN4(Q5vzIv0mZ7;g zaW7E$VF4l7$afH4bT7!y(1H@cOAY)2UX6(dF=h_+_~ur>xReBe<^0X!&oi-4D>`d` zyx;t9mWa?B=wRsE+TSb}dNsXBX4^XCZYWmkUU-)(KD6;R*Gh&vxy@~;VvkNpm^lV! z`ZSj;*VzvHm1nKi8^srvc+YMV&oy8xjaGSU%`euv_2~6BcA;GR(sFm^;zfoD@Q_22 zWO5sI=~C#j*>j5AR~Ydxrv_L9t?%Q zdhr+0&ol}NKD+&jpODXK&5C8?PXZZDUosx>gunhTQ31ggPbCD~Ye_{-)0UB23S2gU zfDG(~#&@TKp?3LahAQ%ZWvC+j=UAY~{}V#RSt3uJCczMyCT$fqcd9TdOlLr@r$C0h zm?2RgbYX~+OA0hWL91{Y5%o}se;cqNcU>n9P>^jfE+)wJ2!m~++zl-Avn2o=3~n@= zeMu9$<*?;^{HcePmA^+4%2U=!E=3pdns=t%KsW;>WX!l6r?(+%cBWV#L>7$M!2A|@%>-gG{Xr}WT7?4E#pb|ts)j&*tQru595G70vj6m}vsXxJUdJ)u0 zx<6DWaASNDB^SwqW%!Ehq6;pRt;ZF#rKc$rJ@sLs)Qhzh@nky#8Xs4{qC+uYO~bi% z)dz~^M-mP1UV^AfbzdvFi(wm%hHCmqg2?PHiEwbP{Lu==%$|5Z4Z;Z4WKg~ZZI@u% z4_}q>C;{w6YPd^vFP8lrxYw^(jkp`EHP-Z#C3q=!skrAjc};f5nBo#b0*ELGoftwp z*{osS(M4cm4=Sz&-98At1)y~rDxU}A5EjWLvh5sXdGVkkzOUk)-mTi-R~xaX4u#Dj z*h!yNHX8uq4H!`iaZg99c){WBx*si(_|a+*Er7~Ok{;YuX86T4JIGEjspSOI%g<88 zeWt`FFzX2=NE)gu25<*ELX7^v@v`5lVn@u@q+cOtcWk(rpno*MQC44)L+o|d#0R$2 z6-5THsJqw>HWf5!c(TIYv&q=}CWMX5&tpM38)G7EejwF6V8LFH{D)Ais*%os7Cp-l z=R*Na(rsC5q%aLK=(rhp;p@r2I9>D7Uqi#4Wd!jPhPivzl!t#)b6=9hXO=9sSM)}< zr{?Y{iEb08{hJCjtc`KJ zW_xR5tkZ_yAXN9(ILvxgqXA_>C+2gQmv)#z^dXMERW*AsBKc_w-(-n`V+4h#8hnGs zXK3tOHH*|fw)+fm7(o1CsH|{%g->W3Owu)O5UZNm7nYA2yweoh2uVE0EvHuzhpDaT zMYAWGpu37ipvZ2nSr^ESXy-OQV}w=GF4JD7HJUfq_{*}m+mYDb70_L5d->xAU$3!G zYoLU_W~y86bd6uE2jQW3w&smBeuXP8#PAKLYm8w1RE6N&&s2^5$si1Xu>Ver7y!Y> zk2A9;U9P$J3xY-{I;)YMuJBnM;)a^U=5u?Z+>_pIy2HAkZPeJuHEW{WmnQSMmmG-~ zy`NSuZqP_kUZ#6A12z+|K4FkE@^_{?2&Q@h&+@TmLHUKNJNatY2Vse|tq@Ew>kEvh zD>@~1GdT|$VZ`~<95P7~S3y}fw-Wqr4AD<0axwv|+PqTtqR)X3U(|x~9TyGVRnVvM zctj)qjVpD&xI4&?6aSc-Ua<~y`+Jaed+3&PwGEa5V~CbI-es{B!`9-|B1~SsyztSy^wp+Zn}5~W3PZT?LmWk zd>2HwBr*d`u$K+#Ct56*7_t1V>OqG1Fvo-RA;)NxH)&qEwdv=tK#(Se)dwWG3j%^f z-<7)P7dXSkoIm9F;lUQe+(uV-shnJ7rpADV(`efZVHtEm;^fn+xqMj7-ANYzZOJxJ zaxY-Vjxs;2=0~SEVpunO@^G~4TH;s;lg0_ zsiDOZmeyXqmp+GoPZfK1V^Vk5tRA&dMuwMsqKUg|P6dYpKgH4$L?m4%2PG~X+~(M zD;$aV0h_YD07+n>!4iXQi$@AgA^yepgb51rxkf4qd6)O4!34kX@$yiaFUD@=`7-}N zYRL~**l9{{POhudl)eazok}7!$YDP8 zr)YK|c6(H4zTc|=v!812Yw}*2ka-@Te*<)Pun`)c*@QrX(9 z^(0@j-9IUVX&f5jZx$@T0foP|r7vB}=qvNn6)*cEEIQ2%qF2%3C7Jmrua~GX|%Mu@JJPG|`KjJqrw&HG37l*!5wU z)sx+A$`>P74mu>g0Ne=Ub9~lJV7@;H^K<%07!A^G7?+~4cfD3e50gY(1-%S3+L@&2 zMY5%7IQ3}gv{NCjC;?=&s*kjQ$Nl^W6)Hp`eZG);<#iflH7I?clJMb!4xhyRbfzk9 zshRy*2*C&8i_v87VOy`tePQ&D?ZuOIewM+mAt~-WVelU)eumdH9+Wy7YV=Ws@1U^% zsgd9470&N!@uTrFJHucv$<8=~V{Rts{P=T=o(7!qJClM3K}1H#_keNtIN|~>iyaix z88j$RS1i=UC{Rl@Xz)jbUqFv$2Uw8(eGv%J<+X|m)4M>?Sv1b@%gPH0m?BEo}f$mmLyf*U93e!33bH(=NgD~+gA<;zfrq$C+C~{xBrNt}Ih2$zd)}d?O(&C4(wg!}-IL0yh zvh>-}WGmNeMx%J7 zeK2gkCIfitnNoq~#!(zM0c)1o7G2~#&T~$lI`NA-2ySMF3nBHI9E59JJYjJlutXo_ z!vbPg+T@XH_uMlS@ka+jvkN>oV0aP6xjqV#;&k$o3>YqGsn4i~uj=fBimuAHT|d8& z!k0E>rOe}GT)fi_byUr77+T{D@vbY0$6!*H$2vp$-#7Vj4*Pox zAu33x#8j`s0$*|QT1pUIb2M4k5L@Ib%s|Xu__!hxhOxgy!q`m{CjPr7Ver+9EA&c= zKs5pLf&v2tgPEa+Uy=D0Hi%XxUw*X%|Ew-9?ZgB)k3*lhmWyZorX^Pn)+`O2ksOl0 z*6Jz5{)NzidylFK&%~5=?L8&OvG^G11*`Np{wW=8M8bLOuyK}WhXc`dHt`~_-!42_5x#f zcUA6y(-n%={nU~KMr}_vui<5{0P>e>_40O=Vcz~pA+KV2aE&DWblUbJ@E4dv65Jx5 z%97?BeZ&`4I*lE|2;9i z!l{h%#|L8Q%_`GqsA!UVDUx?3M++L0l9G)eucs1(Es?sRA_p>J4@mZ2asfd=6^T3r zniC4Cgocko0;qJc#6QA0K9qV(@D&+}xX3`>{7@PsK&w;S=R;|Hn#LajaA1powjjcF zv?MRdK&O`I5XCt~36hT_eIQMDl6%Nv$(CTWKBCZW_$Sx~$ds>za(YCuE|SrjkK|za zd7r{Yt5vWIEntknFkT8cgyCg&gNO#AWIaF-A~8KfF(BQVXD9`Nc4Rwcxoq3n@` z^3++X*TkOLB)QUnljOb(_~^y!!Hd)wovqkQC1-z)j5rL*dN$%h@Kl%}NmIg3Jfy)9>z8x*xv|tRk;ke5veZ;}k#tqspFC%-7}d zS)2?4VaQANhTL0}M^_5v48;ov6>${tSqAZfm%x7xDg6;{<##HD{n0Xz8Purw2xF_G)im_of@7sD67*?*TC|zG_-W*FUwv|mgH{wP*l7u>i~~wRZ3WZJ;_6C zlJW^t!!Pbs>ri{vooas$6I$GFO61rJw}DQ~n;+PGzCl_b$RAbo0cvo;ytl(7-c>ip z=goMf;l&rr@HQZ&FU=|hUPw|;AKjX#xzgM|HgHXH-+ZnaV`YamQ!>u12^Z}IpR3< zb4JQ!A2&#Ez;!96Q{p1Y;WZBOE-}3t*$u82(VXd5>OeUAQn>*ufd9bplSwi=QXP?} zysIY0foX1ng2T^VvQ8rRKoGCR`_8f!i{GG$KBtP+J8$dc08NgB3H>X}>``#7(ZmcBM?F@mb!SD7*KWV&`_sbYY-P%k^lqeb?<%YRnH*+LSVC<{;O z^zu6a;8S#FP^Lj6eN(S8tzdxtxXtWK!*PcD1}5Jmoqg14y&^Z#H*|g=(Dy@0++D{6 zPM5mkz}B{UIg&y1xy66l?I&v~kJDh<6$@bMX%WALOpHGP#DdB1J>>~d-5MOc;4>EByI*N2>6A?zwUyM*>lXk zQ>|Uu@WWkoY>T_dmrE`{NQeD!^Z~69PFZB&xiA;HYwKL`Bb!o2*ja+@EIYBm&t-zO zO5%P>mBS{4?5LW=mUdDeS-0wL0O(=g<2@Qan2n!m!f%1Cy@B}mk1hNSE|vw7*v>(J z8u9m4ccLr~vatJZuoU8JZTGO6X!Rxb@z&5eHT-^9Eyx&Xt<|y? z7`5X)FZ;eK@^*<`RONduwz=FX5<{}D&`ixu0_ zuejXPh8WLiB^WFlfNly|bB2JWc)FtZDZr>aO6C|N7)8Oj?YFp|o?JG3G=bBy@khNk z;-s>lojGmU)CB}URkG8X6^pm5)~oWNrf>O+e%lOUB5Lt&=RutD16& zJdneFsXRLnOq6gC=aei3WgYD-*Ar;A0R`H6XPILH=aiBRF4SW5iLI3=pvv7t8MF8m*Y3kPAKIkt(V2;&g>|tCS`N0;bHEbVuCTjg-;r z%v8A!_MEATyIRoKao%DD+25^J`+V6tMd>f%QxuM8iASq-JUYd<3tDkeyacAf&6b@i zMYIyd6`NXH#c9H7Brh}mDz6r6VVarUm51|Iz{C!U-K$jDyIn0_e2*y(Qzf=hqYN%Z z$wAJ{3tWXF?yV%O+JB( zpaV*02#;PQ*VumXq`|M#*!3>PS5FG9b}5X%*JZY6C#7?tfr1sZZX%+f8n!*yi{kq& z(DNSvd-qI}bXXcLb*61{nh$c8FjU=sInHyEM)MMOIv z2nvpW$OtlEb$OtDKf}xYdF8H24UU0z6;SZ|s@W5t&nR6Jx|6(TjrfilQ6$5OPo>svT=PBXtqKoZ2B({5l!sFSmEFKxOLS`sz zzNG`tz+y{mF@Z@GaIlUTFtxYCPmnz=o68|5o4(MeU$<9@rUnB!d0*Fow zn`2Eb=5$FSG|Wb^BlB}^@w;mFK2`2HZrF=}hLq3vSmjdOk#Wl&F)CpSo()zS7`tbIJ~^hpt7_XFMZ6H{VZ#-C{eZD_q-Jbk#7Q|D;$) zs*N%mTOTe%1+mby+Cx3C%;NNNGIb+uZ=3Y{N**v<1_bQ?f_kS)k zu>G0E`?nSMdyu6+v&hH2#;y?bVVEGE4xlQV@f@=PD&nJ+Yo=8>Qa6oY(n!IBn9|Lw z^>vhWUZss!Zl;mtd4Q&i-rIE3h$m{z^eP8^&5C8iv%hG+8C$bI-#qK-Wy2SDC$pYf z@|BC|xa+1a=-X%3bO6==s+W-bTaHxzPeApBn->4UEmbxX3c$sCRTU7rvOP!qPkTv4 zD(`ZQG!Q}M`o&_YHv%eIizOKGIm!K5+05Na|3w&tfP(1Hh+aVSp>D+_CbM(^Ze*%&C}(-ik{ z^iMY=TV&WvL2;P5xDl~Rm+dN}43p!xBPrpSQuS=0inl#&c;!u+AK#<}WC}N$Xvk@? z;e96iijDI$$eWk_w-ppiCCR?Ka!ydolZszVl9^XE9B+DMO$*8oZSnwGLrMb!&X1o` z_*v-M->asx77ylV6!)mdPf(b)0s7~EP{|b*pnr|NqzR?*O>HE-dK$&j8VMvm#J`wr zO1(w-fQd`hY8vIV;HB4-iB)b`a+>4rWs`4+^8AP_78v{*iKhmsbXxGV!0=K^E?A>W z;$X{YBsXi6rsvyL*z$!|&=Y<(z(~AS;-@K0b;)tM*!IEo?u)FRcU3+X zsFbq>MP=EbVnL;4c=7Sq?4TG>BwMfYesYrH}I3v%m^? zOp(~WnthSn10&46ORZKirmJ8~)3UUoD(oiisG0p`I;V0OoCa09+-0QEA9LK$s4~7D zbL_q}`+W=ls?EQ&*Z~?*4`_|{-{D>chk8}^NMR=u+^*} zZmFT)ZddU@{6ncyosA5A;G102*zGD{lD27ahiIuu3>T;al|FR&dQJ3pf!SY?&DuL{ z{5@MF?HD4hHKS3O8h^hNFd^Fo1FztBuiV4-DR-c_fue6S@!}|^2z~j((N+Vqf$}tc zyuctomzjM<`8?y8A}yn2@fs=MmqxpIL`*LBv1VN?H%h3_Ck=@|&w_%M)|6jZ*qK%x zvASxL#ywRKqPW{S0cZp}99*l!Qo2a5Qqp_MP2H5>u;b_|Mcr}^sX-D$>C1v+GL=LN zo?rGW?CEy9U+FGFOxlk%{)${LF4t*3o^4#73*?qlz}88kUA8eql*sb8FRbyh-kpKYDlte^z8_Il<)Tw2S2! zcW<}J8eTf(ZBCK-YFSv_vPzW`hGl-~Xt9~hhI0*exm>l})8eNSS&+?uRR6BVH`w9= z1Fz|F*=j@wXF?f${mJ%g-=_a;Yw9 zFysz}U@3*@_hd`&v>#UcqVmU_KTM#u0m5fJ0rsbz{Ok^~QJ2`>4K^9`;<&MVhQxCR zbIW!_9D4@!cSqCZ)LTq zbCgEB6>O8IO%a(ATcJ3UWM?ts-)j|zX;M8O*W#eFbne9)B+;%(VokfOZIqm?Rk1I} z7>|@7$FoBYpQQ4`ZE=#Sh`n(~7n^*6B5n^PF|A!uKZaD^PURmnv4bz!cWCTNxoY^Y z9zTDkR!?7QF}72u(+ke#SX;8GF0OCxosPe{<9D9|JH4OuK7HNrnJk&LH~FhNC?WmK z?vCCSz$8G^44@aIJ%Lc|?^atKy)&+xO1sRsj*_w7U9k*XF@B>^VlU7O$I;;z+nak= z{A=_tj{lXi!sb6k|C(t!Zj|(y{l6&dAA>ZPccoAm6)lK>Gri=F+Ko_a1}xecl9$eq z2Gfv|s_;L+%M4Iixg3~9iB={4XYL+0n)_x7JHs#m-Us zLJaJIB*V)-M}fEr72DYcI&8H(nC8RsP~Kb)>vkJ;AkLSQ3vLE2Gf=h@i%6b2Rvn}aC0ZU*PqtCjWs-_9HDB@K2fj(Ix4EhN@hr{m zvT-7MjpdaFpRcfy?JHpyZ-6|UYPb+64o*4roym&TH%XiHdkSb@mA-;93NzeBxxZe|cBm+AzQX^;{CJHKL{bY3 z=e2;p(VAc0qzCCZ7w3%>FT}8!N3llC$VdTQe5P{<*ZpawAVybTl9;*|-=qr%cSETv zjpS;)%TVEuVm!=M>^H#_+gols&U?Jde&Vq8Qh(N*qqz5jEb&ecV0P_Em!q3+)bSgQ zmhyhQevJEcBPPayv?L1sd$r-GW7T^0vI!N^S4Nm}*)oaWr-~n%*khLM;zeY~L8+{R z#D5wS5Hvlw3uei(R6J@9`YkU|+ zd+YoMt`ET^PO_~2F#H+w3Mi|WYqEWr?2KqGd6Xo!Q9Ok z*JAveLi~=);w{<60sLFSk1?jVR?Yq}ng^+Kn8s^@-LBaizzNr3l;2^Kk;=k0ddu?^ z-fV-wkb+#(M-K6#zhFG>Vg%`gR&#t9CA%8@EZpZ*14Eu|keusvcdjB!RSKlGYK`WGQC>0!w>Jmk zeH1C~UkVau;)ZUC-D%@pJKI`(&lWKXu#{9~B+%zj9oxII{VXioYI{m2KUkx6Y>ilF zd>czlAEnqwa(dKwwd@sFIbuR9M&5F~bc1;zh-qT0WM5Y~rB}hC-&+?+51xHAH~PaE zEtH~I{6=FlIa>vEIYi^vK{6FJc1;T&y*rb2%zmm=Fv`Y7R~uf2QMzw86?h^YfV-e5 zHiT!m5G0AcxAw%lEl^Jv4&C5h*rt&4NUyLT@w2Q)&S{8QVM@@go>e$U-_L}JPjP0EK`IqELy#Cj4JNn z5@Xx2)fh296<+$b;pc4{n`c69)h_Gg@3nBK)yoYMu7}zDd+mPMZnN1Iy@$&>U8vNr zZ7Z=yUHTon@6qB%F)Rk1SvcA+_G7pQDkjn&X6r}!=}%ht9a_COiL|Q&R=PqKx7Q`H zKW0kyhjzZWMVPt-Q`_EF?a6;wbMJsH?g+`YD-&iri|^Ad4B%r;*tO3Ce(fm!DX{e> zT?c1{c|6D7!dYj0k<0=NUCcUWO(&KjvZB9$R26)!SoReHr5V?OtU72_AK_ND3mQi_ z3t&c`uBVp`&wr1igjM4-;Fi5<&C^S!p2<3noW7(w=f8yq^MA{-3jaAg*qZ*o!h<8A z73K+n)?f&(5HlnUIM5X3qtB=+P=QXCNxeCcC;vYS{ssy_(YOLLRa6zC)=2|YW@<12 zPW5Jx*U(@UqJcsN3!o5yXA7cv;0q zF0O}Ds+9B)wt58R4@@3P3L>#+vP|BRae0jK1tI?ytqWWRE?lYj#RjOHtyP*9lx$xV zB$SuoKEU5*ce(*LjN61Hf_01ArP)JAE|Xel)zIM>Ff&M;RH^F`&Ih4ZhrXsE0qqWL z0r3%(t0C5At6tWLf1~H@)iN7~at0#gu`(DI^OYc-tx|FUG^c^Nx((F8BH39cbw`&& z$KR>>cwn-6jQ%LxAn^lqA9xr<52@5JO7r5S;JsL37_PB`ru7oXnKS5^Ab~;SbZ9&` z#To+;Tt2Ak*yB9YD?(+&VKhm$SIOg}FvV1a3Q2F*0%!>XE!jB=hu#IX1Ox|j=$ zOb^o_TS$W8;>j?~^=Fg-iXPH8#WRf3_%jOBmfgUXm*A0#Zo8hvioH>)_85qppW!)o zH|jB7_%>?Q=w;Mr%c?AGJiBwW?Zq#fK{Q2{#2uPlds}vA;9Wz(Mc-!5a>@Bz56V<& z74OJ)U)KB;^CD1jc9&-Vmtn(x%y5Y?QN+n=cYj#CV)6@3aXGD5{0^&veh&lUpor2% z(2#dgAgw6msS|4AjhYlRyEL)^Fj)GF($;&T?bM)Lgd16o!B{qJ-Aj&v4tm98C##i4 ze;YRcDX{l9qCYQ}NeF#t@UIzSgCkYQ;@}Yc?Z+D>aUiMRS%B{^b-(->EcnsdNJFum zDi-1DAbYE?EE4GW=b>LVO6(&8jMFG*;-Hxt9mKX%p-lUu<^mwhOieteNNh@tu6vyA ztvg*-abqhTnfo5?pe)6R*o>k0hyIMnnKjPDts=>vbIlNO7Z`I5LqGl=&-=(>S zSNW+NQo1=-CGp1h)5Qg*g#&KVNrtb5R+xyX!!@xVG)d8c2QHpbXtWg%s%(6lH5Ow4EnWy(#A^)FMKz;p;maI9?YCG$ zHk7kpix&ePf7m986M>o~0BOG#zf6>R)}hf`rIy2laA`&M>Y5!ZJ4P`^k=vn(vQ4T( zt5Y9sgxr`Si}wsDK>;PW!Knaro+8d~!|liDXG*|K@6_-d9CA>l&}ePi^&A5VxjCl zYWdLFmDeHqROcsfKf2lovKy-y_@}yuKYrmbVYJwd7)D0X2CD{GCRlBa#PH_rjVur5#l(l2R ze9tED$A5K-W4OduC}L^D93Q3O=ZtS}(KZanzOZ~s;oq-|dsNA8D)vWGPjpqMyHpRN z6D>(B)vcPVxlD+CnZ$RG$o+z_zg8 zWcRvkP;q{t@mC!mR@CGa~qaNOES+Eu3gWKJSE&M>69qg0a>T5c$cZpx1kB(Led87P5 z+bdr+L_fLvzoS`wdDh+KW#7?)u)9lQy<_c7w3qLC35Nb`PZDn!R)1c6S9Aa9iocAK z>=!zr*_@uLWE9`j!F&r8T(h_FpqBjv)9VkeRXenuAL@$_yK%4W8VUM2CjZ$s;#P8i zeXk2rw^2OZ%IjN;E4PxxGK|TyYkXWyOx+5GuVOcfHk;6G6_p3xPBk`4Agrj!=CCa_ z6&8%7r7vK~EKn426G-XIzK-pms(ethh>hwiDSCL(7eJ&DhZgFm7>DAyD+7kk-YMO1mHC;2WoY}jKk}q%yRNo45qX=v8lO%kY>U%h#@K?3DNr$b>zPb(ZoZ!w2}BqQ|@uTXp_Zj#1UH}2Uhuv>nM zMh{OWCsc6;t1$(_-=aZEW-yJ362VgSqFuALR(FgtupJrbG0u|?%XT+9!N))NGHBlg zS(DRDePKAowC{a}^D2*7XlVg{NP^W4MuQdGZ2!Noj+X?yzZSHc|U!Uqha+hW~ zpTMvr2MGB#o>Qzu86St`{y4X#^S>CB4z~wYe|ydZH2KZSp9nh$pfyvg0rSGyYLJrY z_c>I-kBmw=q^DSBUj&-vM`(Tl74-~tO-iw&=tR}Ujl3?yMD35#TQxsfSe2(J*Gfj! z;N=sjK#^3(AhON|E2g*GJ>l!sT0OGM%`4xMMVp4MIYguKgBBy~X{{$aSY-6)7IOiHbCn>vmpC%t z-U6kdkKQrJny{T#jm%I#+TIdV3}}s1A;RPT{o zyIv{lVxocJeNV;j_gjAcnoQofY#eAT&fdhZnrw)JGzsbu;~`!MJzQj7Uk9AY|c5GJu!WU}T_Ax2hHmbe5 z(Z~qvj?l!4HrYP5ojs#8G8hOr!!YHmxa3j9_8!o7=O{sbGg;g_*1Ym=?#JC+985XL zrAY;0`0fTfl9P`h+RgC*Kcr@Y>`2>e#6L3lj~qYJFbJMeMz$Bi7?=w)1OOMuyE~M2 z{6rhjX)K;0Chyu|8X2Zp@x=#Z4G*S37Kh$#lIh zZ(iK$W#^h=8&wv!|H}|R4WQ~%g>1OL z$o#L&Pr7aK7ujy)=PB-hD?T12v77bEul^3kGfXA|?^k-Pp7^j1FM@oHkFv#wTj78n zoBzS~(?bN?$+Uj}keg8BL#`kG!4ASFEHlWz(1m+)mk`2sPi}WFsfn-99G|a{$1`bu z#Sp*5^WZhDmMv^QqVkU%0pl--FR7VmIMd=?8h(`6-5MsC^Hsyk$nvXR0Lck7lq(hS zp&{G0PRS2NkGoptFhMV_*$=4wN%%|VVm{CkfV+KMBOIf8$tw`B$8h0pgKLEMsr=3k zF%IJootz+~g-w61I z77@BK8^i5$lp3}_X!!XtwwN%6uCT`7mqT;CY>JO{SWUFq%AZy07^iHJ+-Pp2@>{nN zJCDIQY$1_QSu4c_xQib-WjYI~g{R4=GtEGd$sw#Lmb^ZW? zKz+X#$xYP(@w+Q1BpAOt1I%QDr-;fZlvp{g08*5Hg)O41e~Szj|MtC_@_$DLlm8SM zOqx*@87!OArIx6w^jYOvZ9%F7<1(t8R}>#Cz!J0yXM9}7>nU%E5NxT`8`2`W^`NXW zSuc6{dTCryE+ID{0TpHr6>Aub$a7Ud9P9l_bQzSwBzZ7~xa!Slu^kvKiD31U+9JZJ zCz!Y}4-$fhjnZI_;oO@*$5Y4~O4(hh3{o&Nz>g*_3|oGngn{dqm&*apW9}H2kgYwK zvx6ad`|$?@vzrJoqg_avKs9vZkvvQ?KcpbTGCW%n64?=rWQj5q7H?724S-$+NAX*j zp+^9hsKuq2+~L6fhgF&b+o~SnQLzYU7H*KBbez%JK%B=4jA3rYRIe&qga>~X<~(dc zKvk7U7DS8W*4~t8j_Qf7R5&?!?$oFq<}NW>r^bJ(*P&KOJjUd>pG>P9A5`lFSZFmB zL4D8gD1?-nx?ROlozHafV?*NPpx#=g5~xc7yK@yE0vgJN_JFzw$Etq1kRnv%WF)cH z0azMwTcP)HPwq3-dR*A-l_h+LS;_o)<8LGOcUyy8xqUnj;Epw$|HQp`qr6Z4{_=p{#DztJe%@r=oDhl#J((>G*7ykBXsxP4gSm2KL{ z&Q>UJOP@pu${k!PZG)B&3KLek_&bSpp^^C-9;1pEY7o0~6fdWER`kIf_dKRvvKKWQR=u z?3o$`{QT@bAn`ICCTr~w+dWwG^D(N>h258h!wx?O?6!|CR-$`A$)F_>o07OfW)8N} zn%LIDo$ssLSIB*7zKbJn(Ni*C4aOp08B*^^ogf>QItrkakpcPEJz2^hzTLfmTq*23i)${D-YR%#?brGa8ra zZY^F;Ubg*ow!LKy%l&CQ+U9dci-nz%cpku)V*X*9JfmXHXz+eKXEZ-i<%etRhpiOW z%1u7gCKZpNOF!qPm&{giK^Qsn6e5$|T12EsL6h8}FHy+Gx!U39w~AkMNTT91%(yiz z;1t=q!)BnT_)hL655U+Ns}f&Z-Q}(Z;9KmDF`Sz?Wna>~Ugx_S;w~ve;kgThkP zOnHO6LT^`vF^c0=s`F2yd@Rh&rxnp_N$e|CeC`IFZ|bHSs~6`e)bI_)55th@wd&dT zZF?*rZ+uO3;Kj_Vs_SElK5u%_&lrEsWQj>N2a;lv%(u1h5VUO5thT)vGn*bfuAUtY z<(Ev*gS_tmyViGm;epJDy&!lU6nB`$|K#$$^&o!7Fku>B)8cNeir;aG9j4h3KIK;& zaiS{mrxmuO8```-DHpeR*>;xrs6}EAlE1k-`h7dUK^LDfJjx?gtotyCiAeF|+ch@W zMxOa|ap$o@fr2W1pai7hc#3t~R~h7X*+z9wRQ>V*R!=`?5I+4WmH5ebcgVvCcDEK` zXoN3d7M^DY*>{9XmpM_b>K0LcQlD$~r1yLLk6oh8m&7WW2H#6ciup)iwpWF@jq}OC3WAHZFg&l`u3&j^^h0G)j-Op=BiT zpekA4U<5aZ`0g0hFTUFmq+L{&R?~T#Pt&PRR@YLgc3mKrbxT}>xZ6i(^Ay^JvZ;9W zGl?b~g1(B-HF(jD=56RZjOh?!7nGYedo6xQoieh5_#9X?&D%A)OS%pAfXCjF9qce~ zbP!=~LT`};nUpucSbxEESN33hf%3|0G0T3+e9TKRv6UL2Xv>pSk&Ths2OhLsnoit@QtlKXUN z_b`@sT`CP^RazG0e{Td$S8>lWX`lc$wPdeS=&ZLObqUEFrt*|!Qcp=`9I)$9^_jfx zCa^1nAqOfO3Y(W}tKO~4$bwqWv}qYR`mxqgQXx&AJOkDC z3?=xDPs^%)73}I~6{$CluaNDn)jnFr=clVF{uN|6w6CPP4F!P*1bbSg&Q~S9R?CAW zc|+GjfMm^uGKQ*OtcUPiC0mo^@mVv3X^D9fDzd$hf^$1&O>*s=qfnBKT|rK5tm>j; zQjGFwp^Qf>5_#%AI+VukkZ5~DF5Ip;dxKJ^Kpb^y;ttG@Y$P(Zjp~J_4n|F&|96oo z!;JsPpxD?@Sk9(mlFwGzV&e#ZDdiu!8ZAIPzo1;P`Q*pyPpK_Ql);R`NbeeUUmiWA zQt(xbqHA4kkT84>$Gv2dydY8n!HrLIJUZ_LUEQGt z0y%mBxbqYTfSXe;-`nI7hPERxP4mxnFWkfM^Y@LQd=3Dyt;#o0)+_;s)kx>VmQ|n= z*!OKfU0)jG)j%S~+d@M?gFXvx0&%7fseVLJ$6qp%iu>!l&+^d&0sdG+Y4(NAg#;}$ zU^&-G&Vjm@{Ztp5C7AFA+eNO;S84+mg_{Z3ZHF^2Jk#={Gp%~`Fa?$y*>H{jOsCLL zJ^xu1eHyNL#a9i%blJ`n@^@#;oyzfemL-WHKwk>L=jY;OCocKvfcHVM*x-~u8>Hi^ z9X4S+{#+M7fRgzh8T;KD|0|5lvn_F{iOo*Ie~vMQIr()`L3IqlZ|eLSgYDnqU=S=e zyl{acuBb`k6iyDkF3l@{sRz-y&<=0QRX$bmk-?i5Icb8pPM5$zbfWNirJi1DR>8LI zRQ{J*kQ}Ks8YNDVJ_kgcL6!`?`m1WBD)=p|4ydOX#(lOI0{U}Wk=jWiPcYJTj>Nyz z0|1FtyVrJ~wEPq#vB{x{@n41u3)XCrZO^t(RI3(#rx2gF$l_+*9G`{FQ@EG*P>@Bu zXHc=N8+9+DysLfndhFZO{oy?}|8Xlh&8~(R&cYk8$LW?hL6yb*gueIKK87r0e}I{5 z;(hZJ?*2ZnEW|Ia8bqw%e^0Zz6+nuv=>o9jtZtBpWLu zpjf7hrfWBn<0}9mQBrNEw+kj_@-=+QTx;M=cBwFj6$6BumzNN!2jj5rk}$G zxC?NV#fuF0psK=HP<+AgjBzWb4$ksMtzIa)wLz+yz!QFP84z~FE3wGnn_EUa?wAVB z9kyShiPFnf$#$sVknP&N97YrD9kE@z)ri|QzGKZ#Z@2B+YyzKl=)?!qDk_ugt+UJI z?o!d+`z=99(o^aEB=~<3Q!F%k19Dsk)u( zm6yokQCGGvk=Yw^BRURU)T)WYJ1{#|5I)t8Q@Nybg|WX0Is}_ZP5hje+mEq`YmQUJ zWVw}%DDwN;`3aU!Nt|P$xw9xSy`0?dhuIXLWXq-6=*`r-R#zr!e z^CN6tnWGFG$FFV?3r1rA-bf}3HR!td25#X-hD#=puT#cIywk2?W9PS0BB_YRLVtB= z7Pk4hqy6{@Tbw(ZhWjECc|kN=CGvyzTts>5KBgi2aSNeQ^iqu*q*7fHMRQAv0k6n9+MJQ;TOXsaiC zW-NWLDeJO>c#dM$o*65~sBPi}%7ldR)@e)PP>lk}=KYk(o&%)Ze`K;(m^1ZE01>$8 zO!7Nb!M*KAoLIA{fYldUfT*MQsmf7B`RQofe#MgN@D5-t|)8wd1 zW<9;6S~fU+OTh47Gpzp2T^0UwSU};`OrkuFSm-VyBGDeJbK$gYwl;;$8 zl{}8@pe_CxSjy2zf;?pwodmg^g$hXQGI=nCq8%!6xY?bprz%|2DLbK_%~dGy2y@yi z-jw~ksnqc|`PhJNUNROVnNWrdqW<`XaRpZgyS&r-oSA=XtQ#PP*{MznN++X z_dtM-D^g#z-!|B#kb>AJrH}eeHY7Vxao>%y35uW$bWF%UQW|+}x=yQc1Y{@VG3d6h zq%5@J*AN}#9~lYKQqYNV<1V9bgoM>fzv&SETdA~wi}I02T)$qi3W(DRc?H9wyZN-@ zMZebg3h>E&s^uJJdsL6OX;x`dbT`E+S=??*kSBN!s>OV$?Ilo*%1Ay8<%lN320P5QAr)wc%VkVQKrlv$S@B=;Hf8Oa>|>p94D`lt)X6@tN6%I# z*^hakFuzP8LcwQ!c@FG^Ao0SRbg>;w(0p6VOP@FWXpwB6Ut_Cf%8%8dx9Ee7C#jYH ztL)IK2U-ab*_x^c^ykq&=#_nNd$(%1JzBMzTq3)v>4zt%LH@c-MOw}V%s6d9akgSs zTVC;wL!@Y5D%(#fBRlK@)Og2$@_-F~oazRZ7^#<&DbdcE3*x2hvNo*_QD>9qkvytI z^pLph9fH|Le2OKe8WLY)1o;W7F_6dC>-=U0=p4zdbtsK^CHnn0kWH8|gdbNs?o3;3 z))jFxqZ&?It6siIr!;KJ(Z);uvBOVw$$vBAill`D=>6#!0dH4k`gI`A+co}Djg_r1 z1sWj^XxGGB2JZb-m(l9aA3N2wj4n5}B=tv+SeRFSxr-4{UX-)7>gPvN+5|hYDtOUc zC36z$Fit@>{dx0rm%rcQM-Py0txRh7epQ^`3iR96u;`L5#CK{o8gXiRA^3#yee3Nw zH2z_Y|FE_4)BL=H79GSdwsOq9aSBMl>WRlGwF877`}CPTcC6Z;MC~?zL#FhcJ2_rm zvL0P1#4WZgPHDsWk}Dj352o{zN)Vo{*iMV)0WkddSeORC<=7$oqQfhmHG^!mEQw7J zh2j2;7Ws%NhQzX(eGWA0Nftk&%?H7;M!SwgR7oZU_O$24IIGVGOE=fN)i)+iK;^A%(Y?xTyD^pR#w2jp)9S-V`~ zrHO%V)mrQB+|lBPro?tN?AtYLd-1|5+mlV|qzGX;S0OrivlF`ifeP_xB2ARV7XUUW z8#Y#LWapY*dKg5XuCs-e0Sc>&GB3k(O?E=pNCQ8==7+l*$u+Ro_)ku%}Xva z{c=YWvXSC^d6~}P|8PK!EYl-D)LsNW?NnJ|q_O(6%#7={c$X+MonVR<`&CiD_IF3J zp=Q$ZKCsb>5mKVh&Wz-#UtBF2tU`fLRru8VZ}mUZf2*e+|8xD%FM0GI6Q^==#?a^c zM(QZh*YhZqYK!-YR^as(;nI;cfy;mt%cLH(4Q9zwX(*4jcxC(q3L3U67~C9RH0fJc zut{6;Gh~=Pcpb8VRw((wI3`CPmEmk|kc@$%xmxZ?D0c}{GTbOxk%Ap_+K0@HMRISk z#UvX^u7-^^L~bE{e4gTxetDVf#gpZFGO{2rrMx7{gQ3z2NoC^{nLK4{buhx7?$S!j zTa)DeJUxK|1kWgR;{n7U-CsywE5tpP1bF%<(R@~g+k%Obdp>xsRF}6VsKgKPlB_4rLi~V{*X#G-K%2Y_2${g!b7f6P0+zC z8d4~NX*o+^rmxqEjywJ6dm; zL*~8-^Kz~UWQp!ahuGp^&F+gL@D#0^q9mO#f`t?TLi6?dh)3`@ z8X7{AZpF_|P+=81!*Vyep08B+QUX^K)^sCkntVxBd#MPkY|VSE>N6iLFjMB37*A2L z76W3E!CN%8q^evPK+S_m&N885r!)Fq^iwz^^P@nmqk9>ZAX*4rAXD6+o;^i`RYL;= zP*Y)VIpj7<@i5`A>?e!lAe+IA{vtiW;+N5<08MtHOUCjExRGI}TIZ0RV-liVZn(pS zSOi_UOeg$+=sk>!`>o1pX7**2x*g$lh0wn(RNA1a@C|tSXOudGy**K;7B2@(Cnvb< zR!&13yR6rPbZ(9O>l6&kmQ`tQ;x_=S{AmS}vO)3ggjN^KH$|1}v}h!*OQz&T$~3;! zcAJV=Wk3_{O@`;!g$L>tw~zy(S}Nk)W_#H!nh#13(lJyt@-~~#tNAe=Y(Z}Fb5y05{)C*UNT|;$h_>8u{M&*LAY>tDrrf zZP6Ok-Zm(dJ8ci5j;3He-%O#F9yGZdF_s9^45J$~hP3DHgVOfX&2o(SzBylWC1FoAFaF?^RuCR8ZC!9kIPCCbdeO^42NY zGv8k2zZPPFf+@ewv3juWep46AyYV2KaET(n)a1)s{B*4$7K4ZLd2Q~=s@NHCeFJ^X zdK*=IhmGXMerV8gmZI3psTTN#zAY>~cM;B2+)HKp9(53Xg^D->n79Xlv!!S}0_4%0g zYAs7#ErfEW8vJb6PY)9H^kdzgN@bUhws}0-F9C1!+GrKsI}QF;Yf!vhEo*^5-Ob?N zW#R-bv4=eB`R!DGCfN2#ON`RcYvHlB7gKQISX;boqNBr=u2)`d&}sm&TPN*-BW}?B z92Q`Br?#bGe_h->3c$SFv7T4ZmHX@bh8FQIhWN?t=;~N;7s=u;WNK`vy1MT3&qj&) zF3yR5qWrhjHHmj>_405*jMKbb@R^zqI}6WGk9ATzKcHJYt^8xvb2f(4VuMpX+5QHt z&a2&~xgok@mMtFX0w8|m@H;xh5gn5Ft<9YG3@_`o{P@ip`=kZ-Reu3N&u<6sPcOK*(%YBd|Qi0 z$g5iSH|xoN1*!1gmiU$b3{s!9gynw%sWQ!~jHwJ$y(J&Xm@1S1TTLOS&|OL86iUDj zn1hYr6vZ|uPjDz~Qc5;{9LUiegp}W18KiyWU!pp?8Sp19Dn$c=uF|Jsz{*fW9;_mg zr@RH&g209F1?NM64@6YYJYOXb6*<+X0_{LGK@Ae;+1)9{)1vB=%77@`8(SrllZ#KP zK7c$FL3&%=paJ?Y3Vj}-*h-~j&J0=}wmyNARMqqtqYk7QWpnQd?}e`kY`?ER+Y5=K5;oez*jW76D}_TP8b88A?L>d|EsKN1d(s zROUTtj+9pVu0|LsUYDyT%N2V zER|QkJN-LkC1vIKu#K0Hdcg_qt=7=lml~x0zpFu~Vr(0A^f^TrzN?XM4)%9FKc3>D z6H9scQiBn?fTbv{KWScydmU$rZ&UdDDafJ(*Dax%x1v;dTEdfn@kTiCDKp3~!7NSA zHu*vJxUhMP9aL2|l3#0pRnlwr5Umn!gc9>&D8dSf6ss>RK)LB0g-g1P`=SpmhKlP0 zBC(6zhyV~6M}E0MhpA%GHa}kVagRZKyjpF0cq~3wVO5x>YQQ!8^4D6uIL@ldHUX`} z`&CMKWg)qn`l6JU%|m6&I-BH>>m-{d)qIQmg5nsGNw2TGe zE_A@#&~s20D{Dczi(&TUn(l6mCxbW}cB;&ud8QY~HL;7K*bk~Kg<`+FssthSmZ(ey zdV7>DZV=?5tLF<8`ysV2$)QCHpkVQd&TiM})sMEl1Y|+$7jg_#C$G8tGrx!xaUKN- zAF+6s?U$pqDqW3I-ezjFg6$#7uR@27v`}G<02*ErYjEqudk%-VFIqAA7f}v!U)ubP z3IK4EA{C74)lqTY*($9cqYAqYW(x?LGV6luHVPuXSo3i0AUe-%G(bvTxx8AnP%kmz z>6(;Oe<*a!C%Qe$CX{Z|s;UmzRR(*}be>kaaSAb6mfT$pDu!8G&`N6*o7G1q%|>}6 zb31H59X6^j^iX8sM&@O|*2E)LRc{B#*6WfjWWokNuN6cRAE)w97(Yv4QymP}M){EC zhuc^jgLMc-Iga<^8IlXbBzvJ1S9s9!a!eyk;~>UOI}cji5|l`8-^83ZW!jeXs-8)8KMD9L9OF{>R2KGw39NjpZ(=2u#HUh|8sRZ37K2Z&iY--t6oX7d!<=TX;PO>QNO z#mTZxEptsT{gESvDTBF6ajI>tdN9iPGp1N1GY51SFPA8o$Y!flW@HqBF&gYAxnD-7ZK62S_cV4_U!URt?WUJ6o;fVRm=7A+8Btr~lZ)~(!T z*I+=sAxpNe0-$OenTw++&+nV{sw{*FfuM_T;AvlJrH1#q`Gb~Uba1hbK1~tT_;1_A zWlq(X>=L=g>hya=$s+$lU&?|CDgLO6r!cg>-)^A)ZZDa{A2mSqB6So3*_ZVf;jV)*5bkJg8i4VJ} zyZ>6}zw7b~8nc(lW(}JgXl?TgVoaot>oJ5 z8zW9^qioe`70sKNmk@-7u*E$pux%^LU13(HSbjUF!HE`_p5jX%IG|g#?{!hyjSpd6 zM~|}m)JFc6!P_)3i2?>&l5l^c!Cdo0Anj7gXe!gU#%c07En6z7Q_SwEWfKpjdDr#yS!FcRCuX{^bCppwS?((+%PE{uSzEn?VR&zSY5peQ1F%J3 z1x~B->;>TC+KMaey($Tqz5zXWsXSQF2Rm^v717I@OK9~reVjy`r4Wr>EYtTG2cu#M zZl!99#wo-bLAp+=j1uq|xsqOk-u{C+Q~*zmzATwG5+s3!B!7Q5cJ?rx2g`Dz+?&&< z1BaBpgep2v7Q`f38lvzfEM1{d2^{eqjTQinY)+O3lP%fmwi787B5nyU)Y%e!h*I9I zZmJC>gTrM#$gYsBzBHlQSk3uLkR&7PcKXtEzCt!^dLq?Iw!104H+|C}n>I+>HM1va z*GQ~RhUqj6WX<&ggh~G6!9C)>Pylk z)e6n((;s{5Rj#|2L8n|QgA8ufEazS;D9={fMD-<+TQuGZA$+z{PnPSXAy!aPEI$Ox z;E{n-c?*6}G-IjyK8M^?Ggq?_a?sJ2Wlr*2A+p6t*9wt8|YWyUwk=gfgDZi6xdM^&X$8rgEm zT@H=6FRP==Nj0rhym&i{R!Y{x)mz;6d>6w{w^7-n7P~@rK4FxZmOZ14m~dfpr9w;T zpC#W`hi0JRVT&qL`QZ&Z`EsD>t2U~3v&O-2J6xlqwpHB@jc={i^AlBa>%(lDt%|=G zGHeKI6&-Sp!mlzwnN$K|mU>EK};WO zX~K-);E=CFq)wD+TW9Xtk=eeI@M?;|O#u+s13(zI?**O#EK@2`zEHN_kZ}mbONR|H z18kH|k}c;R)r&gY#lx<|X6UpaJH+4%Fcw>bay>2@hTFDzsf2D^P!osR)IeTaTB_1C zX{h1xeQ5M`u)r4@qNAM}K-j)2c58v8zgD;JQTv-EtcLI&wW{F~FQl6<=bC(s>SHjn z=WDRm20~iJ3cHSq_#{gyXRW)uQSkF>e7Y`m@mM5&E$^-|E=>XBuns2hZ+9~`rR;4U)s26%Xvr?SyPf!&{`W9fdwubM~YRO(8*_UKYP~*Afl{B`Y98?0HA-^B2Kleo^Zs_JJ)v)Q_;X{5&j+BZb7yb1BsN3*9|Z^K)7<{!NON{sK0r;fUuIiGA6E z4(~yW+qB4xI#RRgcPh%m@7BZ`S`_lA%C8tBCT~}jF{n-M;qyH43teI-+V|c5SA{{Kre1(WLR!XaSIGgv7@PjoWd-e(^`?! zb^oZwc`;>=F9G^gDNt3?Q&k>{0(2L%j_p@$sc5k5A2mHk^2I02ufCpKm7|(b6#?yi zYDwj60)L(HMaRr7iRn+%S+M87ei}JT|NqIU^?!Sy{y7HMdi~fRqpfo*Kxj{qg>Lv*wIy zMH($BUMN!ljlQIemcFzM6;5Bp1GCnnN%BY)0?NE9U<4nnfDR~4)at9g)LcUAQQ~j3 ziAoYC^zCL0xResInpeo=J)n%NcoU$nxfy20t}=T^?k?VuX%%=mTOG=hcW5^XSHC0E z@`O4rW4|MhkK$rWU4!^}B?xgcG?g0(A*Vo8J)*MZk~J(J5G74{%AFd0`(`_}H-e#G z8R7JS9)OQ{Se1roEumS96t`0&+M~5G48Wi4uY1V`%C*PIE~8Wx$~=iSRHga?@*s7m zum|EF8x+W~uQtf=pkpS>l&>Rh(s4^k{xk%12Plaa9a9;?1SWk76A~(XD1UQ3O5X*q7YDGa;>Uxu zez7%3rWh7%Msg{Iw`%obf=mnU(+5@hHj|&g0!Txp`nU(0z&3udS!b74DNtxA7FO}b zkyQ2vRsOC+%ftkgYs$8@C{vj#$cl$7Y{Q=arKn;aR~l@xTwC1YXR%VPs9#Ok`xk@X zrUmIdrCN^a+^<%(ow6>QZExB0l)fyziSg}SP_9zPxO0Xpcf}`q5bdi|@nf6fPgGkOK)%wm|X9 z^G!clps;7!=p~_nCQ>p}W6!qH*uPn)@+q{6pu0L_c(cxbV2PhsrLWp?y|y5JjLIKA z?()Mm^6s!3b%p_RgI=XkN84E}9Qpq9kv63o1{u{%nrfH>A(-B6a6kP8t4AH0Jy#iN zDorZZDS~#ZQGTt34D`uzeJCt7o%28ql6`b@$FR&t+iqL+5vO!CAs^*Gh7)ALJGj+U z#)oAI11Eb!7P}a-^>aZ9b5Rpq{aX%2ZLkx%s>)M=N-^AIk-;vj(sp9j!OK?iC}cJ2 zqltze|CV!4W$m`@sr^xWZcTi%0}p)+RqKfFQOS4XbYKn}43AW7-Q|XgxQwFnPb*Xw zC@e3R`3>FVH7f{5N-D6ZS4D^+)2z3E@8u#{T-QyBiEQUidp&M69&974uUa#3R6D&G znokQbD9s2&8Fbv<3;2C8kaoH;`Z z8vOiVjm)}iWeo<|d{cb8Ln0^0_*Szg-o|oUZE-l4*xVX*N8H?~^Rs)z6mViXij!L= zmvnxc&nLoCr1eWO<5mYnG0;w*8F*dh-_gX;9^S%nl^wIf^vVf_xSrz8v~0In0UPkq zHa<>jK;Ew ziBW9Z##?OhdrxA=5)joS-F?~=dyJBZttZQlM1{k|EnF6J6%)?`m`rMZnbgF7+gMP` zC%3tmxA@UBifFej{5~5J=Pg6rQhqP{_lcXF8z& zlT?03wU|OIY!EfYVLqpAgqkK?T$PHNaM2g>yOgs;^$w~!P*n+Z@)T9Br7G|?lH^1V zswYRhNGtwNY0d>za#VHr^y{i`B+sBAU`JI(>K}fp_IGFeD>cRc=5I=h|3yvlf2O7= zuSyvV{R-u7)uE)TkCi^KrC=3Rcs)V2`VtCWRs|gl z=&KD9#l#lL10_{asG^=Hs%)MzQfD+rhooqaq0;&i0#_=;ULLB|OW@if*?AV)=@Wf0 zjXSg|>wAM#o2|O<8Gc3!J78VHmfh88r4YwN+Sa@{`a=~nJ)H# zKK~^n4qd5KFj$n&rYKYR znU4OU#QPxbgH|snQrKtti94dAsrJr#7#O!{)tahsgCWpIjirN`r@_t7h-Z|!>By;T#BSrV=HB1Q#;kduxw)6pXug@4!fu>ha;4vr(%3HzR8z+|39Q z&$ehuo|oOJQ5EzcIhWjkXg~*$Ati~IOf>w6zTsw9tDpDR?MCt|=B3-Ke)O&(3ey6h zA1e5jtzwZ(Y;H%&9%(2K*I4arSKP}bdx1$`5eC+06XYPd(va-?)vB`m!|hZ%Co?I) zYd6xVhWm~|eob~RITz@N=mgbIj^#o6C9`_cF)Af|9WaVY`O*$6Tt2^z@;KPeRUy-{ zY83*`D4l3|Riwkq-Z#jSpp`530;Q3C=y>@{rf8+rVS42T&11zY5=DR^gW^y?Wc~w- zis{V9X-ks^PXxI$PZ1}zLn8f`gB`xEdsU75RWegrXb&8*5imPS6ANXVwq7u4dAYcw z14cbIuQ0^-ZM>mYtJ>UJt?C1Tqi3s)!si}lySTok%5P~Dr^1lQOd*8COr1)hWgVIq z9m8qK6F>#u1}%Q~4^Ha%OZ1x&PMv?$!j5jI9g_H2li$e1JX5hTK&T|h zWSKwi`SB?(U>f(CY|ts;uPR_gIB>Q?L+*?=**=EWBRH<+4w+&j6}DT85$0&%aFh^t|u0Pt?LtMK3RHL`eCDDFC4+-FrekeeBYft5aOZmBp5gOwKk ztsp1(eN`bB?6KFVY8`MU>+F8Dk&d+}8CCqnAs094o^%uASxroB(3)R53#9!LQaMDV zw!24*`$3CeEZ3RRO#^i+i%)UHAvPw+*BRc(?k&8{=1o()(P@rP(tIQ1$90KTTe6=r ztM5!slW0LIZ-rg4`CVvYEOJ;I}OcP7h;lO zHee07ugT&#i9pM)?rWA%o!4hDv9Gc?ORgH&#dBW)k3-dQ`+Dk5VYm1 zFw{7$s=>MaQzJFcpQ?_VS+Um^YNADZQy26lv!1$cORVoJBdh#|DNAZg^7-_S|0NTu z{I`$FME^`wU%3A%5em;h)xe`<`Ur~*(WXhY&X-C(AgOWmJz@G>uvuhP$gyU7R+1D($#DB73 zxi=%FO&_Bn;7TG&qFqW$=(eoT8=#s_RYWk0tD5M2A$_4Ht9&J}C*uv(XZ`>$F6=Cd zJTE2h8GUz!)(250ZA95PR3?Fz6BaqB2Pjj0D1c(Zm{3crlAjK(UxDa=*|)U{`rbgU z^20PL1v?m$AE#PWuXS$noDh5r(1N&1dc1k7l%ft-A`KE!EHoX1Su-*v= z7s;l(Kcj^5Bq~alQQ>)VqfU|ORav97iAfg6N{v49QI8Tz z!|zU}NiHq5o+gPHWRubNa6oW=yq3m>O?i7+YVrjpxn4ngG=L#-_hpo>u|wG0#v(3( ze%}Z!L?t!etm2;_ZU6|4JPbA5OHL_9Z^=H0jO|`!=F>v2m}<)(uF)4E8u?7y)pe?% z%6{aGKnQ=a=BITopw4lInKtDEMHnsNts2$e`k8POQ`})oVu%uye+D!CQdt}*WV=

ZnHHQ~3*BBfZpJc@tP?WgA|^-^@O zQ`JUxDn#H53IJYW2X)Y6{FU(?JTd5E`t@5?F!hp}7d>I|uNrlXVRBjKr>Okzf-1cS z>5-aM`zsUg$THv4sb>>w43@#U3Yh`?1to~S%`o}$$*o@eSDmW+v6*!mQlmBgb6uR~ zNp>eyxohOt82r1UTKsib2USbK1qWRUHCOf09BdwA`!NhYDstrfmqD4@Yn`e#7J1L( z>z*0d*yCMwbXMz(C#Or_&KgPp`ETbR^>BV=tDlDy~C=uy>Ny@pH--b zw`&e$TijMv0J#Gys;`U8L0BT-T|WWyw$rK$wS|cPWXys~#*o#Pawd-QC61A>d|OJK z|78H1Slp~jZ1+x@?l4fjaGK*Mi)!>Wfc`Z4vB94nE4BrKr`zDEBXV}s7+E1fK1zcQ zEU#+yiW_zQtZV;`RjW;@)`Z?7e$tTY@hD+TCzwj(a8Adc}gtF*65%d+R2scUDNjA6)s;M@aKE(iG2Wx&Pj#tSwAEG>nmQYIbI&f7^ z1(C+8)jFqjQa3PeAg2QCc|?_VF?#yaxJsayQ7z#HY&&xmhX`mcn zloI!y6+=`v0i-lpu3}|#s6bt%gdMa{UMOT(oowLM@<=M=WwP{!4vRAg?^Dgb5_mR{ z(Pt$>`jFZB>btzA&8l)7@GREoqj>Mg>_-N>QYK)7;P9heWNZ;oprO1&3zB!_p6>EWD%(`8 z@Yn(5kToB-Jhb`YU0QFLbZO+(&zjq-?j71ly78%&{k&2&dGWd6zVvyhm=|ajjD>rl zB2}=JZ8U6s?&6207w)f9jT3t|Mg6)^CEB~I>Z>f;&M6FYagE_#Td(neL&w19IMiBPCggV2SMDXN%pN zs5Wb8!cbmch426=y{_nMYxHL+G0G&FV?-}9XsKcP;}-sm$&Xiqd^Jqe6i^DL^^3QR zAUYJ9^rym$X6SgI>@z_{A(t_JfS?tjY}ld2B6(BcQH`%M*n2H>t%EcU42hmn*r(P0 zTdDZjkI6uIn#zUF#$aA#5T{Ul*P;ZZn97=hrIY_^f!PlGCN}vY1hAk^$o{7x*khBj zwu59C#zFM`cJw7B=22OA+j&OufX_Zhy52`^4Ro;VbT2r;al-PdE9;G#sJ8kjpj<$MQEe)&9wrVH4 zRrQZQwb3Kv29o<#@y9NS-ENbHd9DC-`D?(xD;=EkA<*9_PDMHD#ZwB! z-Y;!%OZXZ1GNbzC@y8?I^$Y=^hiW!mpo+djT{s_&qADff4LW^ElFBm9tIDTzwbQ`C zB`JQ_2v4@EulN*)YGQpib>yjS93Z?mfbd6-MUTZhZLVYPJfk@0D9*d1c-bnB>B6{s z&a^MBvZ5(#i%*iN4BLoG&a!Znd~fgN?@C+#>%Hr-L;gQdO9KQH0000804;t4Ntcv5 zJ4gTk09XJ303HAw0CHt;Z!c+XWNBk%b1!LbWMz0QGSt0wP+iUTHi!ie?(TYU0tXKe zAh^5R!QGt%cXxLUPH+MTcXxLPuEF_u@BF5w?mIL0zEd^-e1Gh&?%uo3Uj5WMy`EmZ z_G)Gh=1(lFEc*I9swxN&(582J>VI7>?nn@jFt?Bp5dZqU`d`_U%)gb>Gt*NnEl*PP z6SYq=wBl3aHR1;$U^Z~~PLpw$;#*F`CT;z`jXmLM;Ju1|MuLOJQt@wohU299VZ7Jg%v@LQdz*sc#R60vXiK$!T_VP*t6IZ z_T-_@GfZp<-x?bSbP1xNFjx~S@}N#@u}|V>trsq?!9GaAp)RQ_-b=(WvxjsM#jYhJ zIK(ITo#yb}87GBmALoJ~J(Um-pAbJCpFjhTqkf=-;F!7uyZ+nZwhSG2k|GI-2K9qj z8NF><84rD*?SoiL7`1211AUo}Tva#uWjAD174yGfhs73k7tZ1R8}59}#|W`-5D+!* zs`6iPH~LSwk7?*z6G`Cbq-C|ObntEPM9q+!ng2NB?QB4@Yq86 zY&zckh}d$(J(HKDBV&ZZR-GhU{f>s(7d?!GN`?){BUAhkMs_)mcp0byh59vVS%Lgp zETVTkpVRu&!ERpY$@=&?_r|C<-}Tri_r`jdP$CLSwoT=->57898nb>G=WySHNQeLC z=B&6c81L2Z|Da>$@atEroNj|# zbs6Sy5F1-rkU_ z`(QnxJ)r(-o_D4i#oxo*bPqMoEErbYF}LUMV;gV?+YyUq6uBIN=*w;Y0ZmJ3P;CJL!1+#p9|75+lV+ z*d}W;{k#;C1h=r=XykLfFu}@*(!g8za68Alyh6&O?c30HNlg5Q?G0A2NwP4oJpRjf zs_8ErrOCYcN=!aqs3oIlJ_ggLbHAT}Uge<>AcbQE-LkDTzXd#Zd*zDIuhP zCz7odNk~Poq{*F}4Yj~v6b3|TNup`+*2-9Fex9L`)HdrD|H17V*D#Xd)`X#chw(ak zx#KfKHLhnXSpgpL(O=0O>VhuT49^)TY&iI|nI~slBDsiO z&meOmfpLMxAAfzH#VyhZ;dS(Sz&eI2RrI-2%|b=pM!(bdG+g#N>NY>qufJ-3p3$^b zsVq^XPs+H+M6}UTmtJ*Po^MpYLftStDYQ8;-yog>o3R2Ya<;wbr2#AWqewTcrDXxXYY#H!HLo=hsE~7Aqmof4rsc`ViSVA_QI; zu2F?SkU@;!`$B?2#XLwYhoy-&CDj6!v{#5_6COw0<|Msbj&CrDf9tOl8ZAZytUxR# zez)ndKD}l5`Wkr8VF-f*(Us$pcLs6UhURQI7(|b|&FnoIG@uLI1AE{Ket=U_6L|sf z^vCZ2JR+%`0Gb9v7Ri~0Lln}E08P+P(AF6=b!zE?7fOjVr1j6R4fIs*EyJ=9IR59@ zkO2XmTl=w|kElX>TRf=xR~Hc^ofH)b%-3xr8KAraMJ`@8g~dL~4G41NFa^AbZDiKn zNM?*Mfu-%TZlM4xShLY*7N=Hl_wP;F0Z$$o*NL;(jZHzAZ%8Z2>_&Px-bV1X(11v? zE3j}NePAq_%kUN4kgmmk5BiXY*Rm2;9r250uZ!_2X*ZF85L%eQ{$bIs?QhcY|Owh6S3YX_}7ybkP3( z(IT|ANXa~eQ&p?wMHH!L`fzTOa;K!$vOynV0fI(ei+j{a9Z!qEVEsd$|Jjn!TAi$R z?#v2Z+9s+r_eY9bq|o~PKFcB=sx!<!tokhTuzO`gP~fi~Y)RJp>nLjLa_@HTc$L^zhfKxrs5LaAHl{HRSadkFm7ual4dU z;5W@3(BU!3Kn-O^H)oXTX?cOP%oCJzvw~3FzCS>Zo4NcG)53drqqOdi+MhptvH#3d zG5~9#$lDb8J-SHN&&S5@p>Ysx8T=+4)|xg}RTVTs&fO;5b|^`^J40Oe+sU)u49`k2 z{b5OdlhpM{6raB-As#GfSaj+%TQ4NObu(W-Rtl+g5*i6wRW9hWtUkDUR!4mV)()T{ zw6|Z}q6=iNJ>EBoJKy|K^uAi6cgv9_&d8K1ECJD$q-Q{X;oV(R1^T}(O?2LV?g9}G z|DFqeNqXIniz3zk67rtmNQ6|Z8xXFohLfemm}tq9D7W=ML?FGFD`JTxd#Fhjld5e# z59PWjWiqA6m{es1~Ukt3$;2TQsTOdECMMa6GWFZO->cY ziG4#K?3?xz-RaM{415(eFEgF01g(n3xlP(zqJQ|rZcMFD>oX}&=>bJ5n|_|#Rr*R3 z-6?nXp0LZvV>Uh6=&|Y2Phnh>>?uenI^M+F|nF#2HuoKWLuP=sU-|`#Z-=A6iXK zN`i#a@MC9Xef_`={3bGbNxv6qUjE17K9QzmzbHNZIRq;9=bJf%HI9PUht|6nDvRnb z3%;>br>cj`sB!rMa}c^u=CD{k^zcI7*ES2ql%!p0@8$`_ilo2EUZ^ejHQ`zhLN(g>qo7DxB?l!2<>5kH& zqE^N~=g(p_jm~B!8Z|xVul8betUf2IFw-t<#@=s2sH>+y6C~(=4hY|J9$?1*8}5*4 z?1azns@VYZ4^$KMUvc04pMT0`a$0)AU+WP4zm)Xv3z7DiZxI5Wz+Vk?JlJ$27o>=I z8avVqNJ{u=c$x+xfI7%BF@t)js|acK>*6_^;EVxl#o>caf=iQp#%xRUA z-!k_2>N@Gk$(r4vD@3VK$@kNvSTr=^%>`+p&gA47Jli8m5#b!y*QwvOK5%Dh_@O&< zYdc7yPpaX-g38-6IcSAgi1D(-s4Hb6D+9Rpo0YGm6C9PgQA`O(QA{O=CngL@XPG1A zM~Aiy@_d!H!{nid`VXl0i*cN$$x?_XQ=h*>B4WJZu6_6$rf`IO4o2^Il70LKn40}3 zOvlt^tqH}jveUA3&zI$X_tHyBCcri&Qp692=`yH?Z(|Hd6(+Y@!hHSuN!v?T)4F}? ztW_%aYcd3+02abHJVM}RZxOagcxiK2|*Oys!9H0z2Gx zoe%pRP3KVuejFhoatqSpW58^k8nuN=&mWmHy2Qdy2NBErd3l6$=V!>a*-^T0_+tTg zzWRK(b*9z4X2|L+ZptNxc?;{xaU1hX6Fx#DPdC}Ig*g-6gZ6MABE08VM@AaM_qqVK z&>us>qg9)vVYN?oguXaVt!Z$ucB_OQYIN7|sC7H8TAs!yz=2!&bhok?eiGtZ@>JY} z+s;Rgdmh=^%XKJ~RqdPsMZV!2&#{v?OQ!SF^sKq6MWlRODbxCda54wux9W5{tnHvf z=vwsEb9?mDOt~(U+j5mmO}m)@F1_|`L~(Z*gI5||hq)%mT%H5)ySQwcmQ-rWC}<4F z>al#w{H;eDXH;|J`Qgb5U`C7;G0~Q6llb6BTg_LMBfL_tUhoZJR}ABk-^~w;r9JCH zuc~(_>xt8d;j`w=Sd(;3GE|2S*kRq!XJ=tLvIOZE^CC>8&MP4*GU?9NFrMQ?%~vbVAcIT=_dkJh6q+cF^lXT%UdhI`XczikB4#=d%KKdwm*l2=@4FfJoY5sjl0@d`Q>W%2Cv>-Z za7V4lbSoq=Fxkd;OdXc`ItPw3*MI9=>@x5+e^c(Egv%xo0CM@lqfG8$?oJ84-ZJUk z>#_fs&S3m$Sc#{g=rk9PR9^@OsTU%_H*!eTe3)XSmBYIj!X`77J}3+L%~Jjrwz^be}u zvc5m@myKd+@RSKDzRcT`RA1<$(L$)|?DLJN{iv`R){YViHtqR#>0~**AwJ}2tp)Kkh}Xj-LKK9?1BC;-!tTUy ze4tTMj@ZIPj~Les*Sz#8%6cfEx4zttq^qS2Z1W92P{7|a<;9j;Iy9Dvl?EiPsN%>} z8IKcj0X@v#vV0)RiQ0ppEJAYQw8K%nj{M4qSF&;iP62Fg|8QW?+tK?roGkfp&6rC= z36N|;s2`zJ@~B8q=~zu#A>mMeA)V60DwQP$2t({-F#sg_lx+%=m%nD&K4sLr zSnZ2fV;Gz!-*k;}BGJZe?nIf)YPFngGUhb6^Ld)w{LOD>W>DtT_iha${)u18{6EcH z|Kd$&|ED#`%+Ze3PfyuSS650*(;EH1d(LR_-0yb`hj%{SAYOHs`+r82QU~wJ`d`tT zwkTZgp@p-36JtT88SF(#iZqy#9tl9`7d202`rMs}0Ux4^SAIqPwFZje9*zO;JID<2 zODZJ<18Knae*7p@=`%rTMzgT(cgyWnlsS{Ae0pjje1UApwH&}IJ5pz zm4TtxL!WP-NZzM)sA15i0k7W5~ ze{c5rw1GdD?csw8ZcpWr#pB=LVT7K~(|EUf1pmb9S^g(@#?P1NC zGeqaJM$+F;RuIEz)%M568f*DfZkpZ^u;Oz_X14p}{AW%6*VCHY##)*FO~=voX)DN_ zWyEZ>(WTyoGg(FAL*esFSgi-}MfP~!E+J#2toj#pN$4Nn4KLrJN`0lgF6d zr4_k@k7yTmSr?aIj6C7-G}F-y=$`m_zk86=NL|)`arBnAG;(ARcv)Tdc|s-193tYC z0kLo5?#FdLrHgsUWY3R1t}rGS|1h6V@0cy;E5!XszS%$BZT`sIABx~?cqQh{A9H=} z#Lf&D9WoaV7E=LGfldDCYU%`jCd1JsKX*7SLnW&!YbB{z07KT?a43m@Wp1(4NICy4x{7}lJ?kfQieBhs)Xxl4)Vq=>ei@CB65SnP?IrEWnT z4%Iq<0JYsNC@G@-Om#(d+el8UpF9N0U7o{Mw3-z*?$LZvxVPkGNuCKF%ZK#o z_nlF*4B-&F01utC?1^&vbw2!03kcjSY*ox7PU15SlnDKrB)I_ryJyrN>XHI>pv(%ZF67<> ze)Em(_)1et!jevHEMM~2hm0srroqB)q+@!r1nDJCcdUXb(MO_+AG*>XLViI-6HhRb zDeJ&WTwjlD$|%p-e4slEg1E~>&!Jv__E_v&B63j;Y@RI&p^}gP2-n`E=h5kT|A@Nv z$BHzi=@t6l7v(or`n;kaARrh}{{c(4`p@&#m=&P}*85(7<_{{qcpIKtVm`I3&o&nW zzgO79ek@jn%Nh-ZIUs$g2ybu{GyM^#)>n+>O?NYy2rK2u3bWjBL>il{w<_v|&?k^6 zl9!S#Vj+bpi5L_cv68l?>0#po48r0Ux%Pbm?T=;rS#S4H{)Hvy!K}(IrHC-Z@q4*d ziymY)qeU=zx9jRqvT?2Ydc3jhhH=L73tHLPl{<$xQc9e9xsJ?nr`AymSSMD~o=Mm)a$)6SA_$%=bT%w2u1s<&*Kz+^x)LO9 zSlNqn#{Q<3txSjua(btI+kKFefE@$(_7)9ysaBymN@ zUZK|fRo~ndV^Mo?V*Jq<=q_2gPIYl-NJ5_zU0l~AkagDwGOO=PgQROnebH^;$lCn+ z))Bu)c}3fSv)x9TFNDRDJaPUPU~m2J=dMrpA7@&8T600`It=u~@k&~Z6Z;$l#A33E zb2~TAF9pBZqk$(lC;9<`y9u@OBuQ~B-=E1(P!;hm3O=I+fq^H4Ne+whnsH>&F!aQz z1?D%%0^$i94@pS4WN1fc6fYv5b5$naGjaqU$`>X^QNF-cc*A@i@Iot@4*Jfds>WCp zDWbHz-}+rS$y-gTqbPCMqcOC)%(6R8m2m+PF-vXwKcd@9-3)S29CSeR%2)I~!gW^yWcZZd*LNS+SX`Eh|^ zdpm2=Ux$qbNPeN$)SCTlDAu+pCw8d@S8&IH>|=VCeQ8%$y~`oNYggH~O&YZksAFu5;%=ZGJjVC<(7w~zPKo_5I{ z=W$3_$EeJ}g%HW~@ieK+ou!@Eab-%^A`nlp zx96K)j}0w(><5S^%I{~j=jn9t%V^6sMem2c=EI#fdcrsGs&iAtO}dt_DSQg?C6bx6 z#TEFNpB@PA4`sFJIP;46?pN!zVz%B>k*=jwciFBqn7ngT$E7~XK5&{u;){85 z7=v0E8}h4@z>J*_r5H_Fsh$X&jBr^YJ$g8s3-;-~1NJ7J&F}Cw9Xp=S?fg8O-#eSz zah*?EG5s2{b9Hr-ZJe_cqQ(a%w5yjtA zG|LOJis;XSCR-^LzPEjK-vj**}UnbOO}zr;4#gg#)8 zleaX9VVLjC(g2)BWiII-_mvg0-`f#tvES#S2nG-3{SyEa{!EwvLqCl6OljQV9DkBG z3cNcpobFJe2Kasr+FKZA@RmaA6_m3xD~1ltwpi+H!ON!d&4qDO7hqCT`H7P~x9BV_ zS@ZW}wkb5}2uO3MwsF|6j<_yffLPBmg%vuru- z33HgQ^~=#9YQ6b(#xx}^%ipPk$I_55_}q&_?9(%C#^X%mxB+4VT|gIEzhvlfPu{1U%l|BDgKA}pZF~g zejm$qz*c~BZgnJnkLg3n1Z;a)b?W;}_R2^9IH>%P;GT&+H^R44J0ClTgW>y{Hyb}( zJEg+bQJi@C*27f4UwLd(wDDs@Xk9Au1?k_XW)T><_J8aQwEj<1^S_oM|IiuOPWnI2 z!0!JYw#PLHwAtUc$5jrgONvB}jJVKrGl%}aI0MOzU+NtHM&SvBBpi0$mwqe%#7}MW z-*Hb&$jKyDN=zq+*LsheNK1*=>VEeI-u%sF-5xVi!pZZAWp0*iy^EwYYKaL5QyQ7{ zP4V+{Ymh+`{(>*&{)9MSH0A*x4-a1_m%lCasIx7$A5LhiF;u_4(l=T#-Ze4W?>?6vcYEY{ z2f>C72a663i!*+l?2VB zMtETNDtvOR*XJ~QwFUP+`tjioj_oUcu!*cr%g{KhA2l$AfG)QAE3(4C|NHW>)lcZZ zsSX#Nh1?h|1Vpa!KVVa~|4DUbGt^_L7F>#djkXubowKn9;zPh>4GO!ds}rJ;Aq!6; zDTRh(H@1EMY9H8#2-N`b^;FQ@?fJ(2ykx-3&a3^;y4QKfA76PKk-ShJwA(s#0S@J? zBu#Glg_ntkS0@gmvy3)D4|d;!gbBgt5PhHZhNJ3q-`5!Z_NU7beW=GDhB@P2;iu}Hsw+St<^ z_K2>Y2|XW4Mf7+X+vvRPkD0gR$Gl#bin8;GA|0Q1-=kmGFSzsuXWSC_Jwk1e7F~AW zdNgJAbLVW`h7rgTTt6y2obdRvD($pQT>qwRf2I;zp`^^-J{=?Q>yTI&jxVzg650?m%OC0SP)FM}G(&M|w zlEdkP)-+b_i>2(FQN0+6;cUQe3b@J((Sz+vf}k{!?9>QEKSU)R+fHA}W?xxcQ{`Eazm`Wsp`iEjBYvD<8e71gIyaWhZ&4rX6W&<}q*xFd~(+&(zl` zsp6bhDE)P7?q6+jRgh*mdtHTUQ8`0|mTGZ%U8(BcU)3aaRn6IQMAam*9}K1C0L%9W zhV#`_%gVQ3=Kl3)L3tgZgF)(`QQSy4?J6o<2a5{R6fE9Imuo*oESs^gPmY zs+dJ?JaL(v8o3X?67vtqt9d8w&O2_*w9ktc#7T&y7$r6xbNVuAmUFQamrJX<6q~uE zm5<+4GON#}WzTu2W*uqj=9O~@R3Me}&eXRmshWAJsh9nhnfPs~svD_XP+kw1!@E+h zrdR!>Ih)R|njrRrUj46Xd-+iQyh>?0z*1It&P36QK~)x~u~_P_YWcsa;&UdNp1N4f z*s4fZo-L(!Gx0%{lKhM?0^MGZ+!6d8#R8hvG}!Uo-!uK>qc`ZUZe^y9^inSEtjbKv zYa^xcb3eWsR;w)&o>wPCdPg+{M|6E3oHa^kcuiCP($ku6`eLA{_|)`>n|0jiw-qG* zT;7;%dZ0U0j{Ll&Hq@ouPOV!ug!F8_q)vBAN3(q^?5J(5NBA}uTdQws=7v6zbHX70 zT7>ot#`MRPwti$LB8Wx^2SuZACtPxMt#4gW$u-OicLs>uv;#Pzv>DZ}u0zH(#GE{W%MX z2ER8`jsA1NCk3Ij@+sNlhSBXtr4E(Xq@$uePYXSkACuBZo)$%VT16>p$3>sC(g64- zQDh^~n)rkHpXZ_LuwqsH0T6|MtoEk$iyEcWlY$vKRF+Gf6)du+xr!qXVxS0tDGay z^xCcrf|)c>9sGd7oM>MgC{3-y>|qvGyjNr<-IgrJB&+pU-vBxqo{lZTl_s2LD1?)I ziW{etm3WM5AUI;5HYjZRsy&4`En67(6*caTJWDh^_y)-Wdz!jXxcy#&_Az5Q<`qd@ zs#y!%h*6bdUl5o&rPY>abw7UYh2LmS+z?lYOUo#0Fl1cbgv~E&3 zM3*o{V=U939?5}k$39`w`Ea~%G;1`yJz+GqJ!~|%-DlLZ-DT9d-KLea_yhPOwer3~ zqoTPSuiU+Cu@qRU#&kzMPF_szORh%FOb$!FPWF>5I(IG`Nx$Ln;Z`G#xUp(d#rmGLPvu97lHST_(Ab zSwmzA-87^ey(XnYtO;8*lro^SQLZ#&_63u$AwRRSA@+nl8b%ql^h$P3o4S6JA47um zTc0n8-nx7fplRS_QPPOGc#Is^cfSvbCAiS!)U2BX42dVm(qzc~Ott>B1ah3)O&=1f zTQdn6l1tE~X_LK6J^9pP;<&UMzNA2QpIAUMAQPRD$;xBlIK11CYvnkl1>Zl!27 zHJ;xg1{V>HlLg?_cCJ0Nywq;hbZ$B{R5W87G^*)Ci^#+20R(HuP{NS1gN+LNToMRr zxMgkAR=HY?9T#@9ht8YruDW*fhA0z2IAcs&8VwERrc=xL^frKfZ9t8NdF`N4Z6964 z6pk^VU+Y<52hguAQQ1z-6)_>ydv-O zg%_8m_0L74+EJsHKE#M5oC<)Nc4h57v=!4&E$jM4+u9zZl0N2$0G#)1vf9raUVuBP zhZW?X_(mMFpwuz@40JBy(}9oQ)u6iH3KL|_GMKq^ZCaO{n~n@;W~iWIj~HrC4)4UJ_<++3$N zyo)z=M>aD9#nrMxEu~W%Apfz?8N3;nT$t80%QAIKw)OM2Xmrn2cLgW}$JX;J)-_A! z9DShjG0u!5E)Hvu6pz*H%4l7h4aWeeYK%XFm@C7YYsI?usIYbplriR=@qr7;24@NI zQ`4w^7PMq6A!?PESKlv}vB$+|t+rfQr)g8)4@wy`%%Bu$>sQR+;4-rrU(u=Ip>eTw z0`C`&(O*JE&2Sx9yRMwnE?L!2fPRfZW(07B^wM+k0-c@B&nCfnMWT!$S|P3|r^FT1=f*_M^viGrYL|6p0Xd zc$UY84?l-YF2PSPsGiX`|jlEUTe?CkPMV2 zB3r&yhvs8@`EZGXBx$P@F6J>^t>y-2o*n!7t;`+|dZs8Oa9j^{$Owu$Q5CO_lg3`n zvY~81cV0xlR)`DAF)&*cEuAY8tFvJO;p#YO$=fH7&@7-CLB5j0FSLMvJ?Kr!aI^vLm$xN#& z*OG}t&^{0qT*=cgWxg{tUoc;gCE*o=LP9LS>*0KNNI@$3%^mp`Z=Y?|IB?)=j3P;a zK)-ur65*%c{IDL#cX-=VV+Cyzs4>|j9sHhN){}=jQws$-5`Hm|B+vq9Evs%Gn^!fE zTNmsjMLf-%*XCza;DRCv#+Z4M2!0$dz@7 zcgzP8B!PY}nTymzwjtxX0j!uZJ<;wwJ>&EN=NNnv4uND3K+4k4pFfM+wW|9?wcW-A z0}L^9Bv$-f9@clwdk!l*(W6>BRaDbEodeKiBsq72#NND*MAyJqjcrzmoxlOf7-fpfNV?%NeqAb1Lq%$t(j>%An;xKI;^Xs-I=A52zm9Kc|jZR z<;U4a=WETpjXIS+yrpKu1feTtt(FVc>b&pAfSeoheNC|)TW zH%#E=)C%r+_dm&8Xl~U{nHOQ-b%X4MmdIT?aY-f~ZwmpqI!lfVO~^@HydVmZ=+ zDLz$S$Ka5W@JMRZjK6dZ`YS>fK=;E$e#FCN70k8z+?1QQStHksa)>_3k*ogs++PsV z2m0~D4Jsa)zIHQ8DH0wQE70I;SEPR$tKd|7-Rwz4V(-@Dgi5#L90&U zo@iI5e+%R_)IJ;lMFXP-VBpb(*dk_Z+Ewm92WeHP2Q~kp5_J)+hRr~E(%#-ZX^0>L z`X`L_$3>hP0RxGyaerONHt4&-(NA}mxkQ?|T{AtEQm#-PIKeyh;D-*RG>q>qq${Xm6e1yeL zW%7BjE4DS3kDv+q3}*FXGp@aUCfTN8SAjo>d`$GrvH{)#f11I>bucrwh5TA%KLh}; ziR~zFtT}iUyG2eZA|4!xh)>K;V`4qnT(h-_=BRJ%Js21pMxG#&90>TBM#N5SVma6l z3r8?hgdqY7o(&mA)I_giHBlVgj!hy*{S+ON3D1M?$gpcP7!=z>E+G;fOoT*;h!Vqj z7eWNjgXc)Mt26izJ4bFMG9O%tWKmo%Xd*h;A1h0qA^J1W`r{H#oq&nNplMVV`FmE) zkQ0O@!a9~+=fSktdXXIe%@^(c8G1~xLh1VDBvJ#vmchuSH$Aiw z{g`52;@u9164up3q>|Q>85#EGhxVaMQzVN6A~Z-Gl#RuDb3?n)MJa;Ckt6U(*eHxl zdP_sK>++-VFxj|_bb9YY5zsLy@5D}G7V$n|5!qTpgdY-5(dJjrhRgj5eMN_+fEAw=lL=2HVxu;)>}?pp2ubFh zp#*&yjVz5x#b)C(lIvX!^-!8)`{ArL&=B^{5BZD5WJXds(9I~v#WGXs7Fy686b-_f zi5#f6Eqg!ZeTEM|BKz~3C=`h2K(}q*8y8xDK0pyIkr@hH^0yDQsBmJ~cK))Gx}56D zmSx1-zlw%dI7?~iVjE%lWi_f9*Fk7oxL1@7i~cE<+%aN&GS!f+$5?zIG7O)Tg92nS zTAKRm@IF0vjQgj4V{>5(FoGS3jKasA;VG7uQ6I{bYf3$16EO1ZzX?Ago}n$allhhU zKoK$iJvE3e!U$&o5T;2`4*+?LhNmW7?u3fcW~s8Z8Q=AuL@nXg3+=m)hNb>sTlqqg zb^1Pb*Zz}mP2zeQkj-do>Ma}L7yhg$whSY#0qd|dJPvx0<7i@P8QaVkEpld%_~>Zr zW2z=wz43nUXjB@m6Q?Z@NaZ}kgX09;|N5RTJb*2v5CY?oxLc~U-E$|M3 z7=xF_S?Vxzk~+@{Y!UT}08OPL@+Xi4K>+hN##!%bRLg>aJ+Qz*XkU1=Cso486=4ih zi>g7!JjtGYzp}q9b!OY=N^oCi^dS|A4cC~tPbZm?K;5XSUnio4IU3!GV_$95#*jA8 z?dn~X^$O&{U4i%r9GJf`LDZvisR^vyhI-%+Q4a`67BX8d)AMj+5y9y8w$w z8N4J`UPBphf1oTv2IfyHYuQDbbJM*PRxU#w@O>Zx0wyL6wTxV9Jgcg>rHphcvw5Mj z^ij4W?UpULBk&0UH9q)zkD4Y7Ek8z;@^Xpu*P~QN-YpsMQlJyNX4G}OuVhUj>y{%p zDX;=z0i%j`_3I*Y#crTBLQ@}_5^ssKq(#;=O_7b1%_DHTMzlav>N*jJo=I7&=tOog zrABD$Gg!FHFG>_41T&h7NEVoE&%YIT9|AN54<_mo6ex+8L`vdkurr$45633v(f23? zmz>Pw#Z6NdnMqYAuG9M{Jr>`*XWL~bvjGD6yGf$H(Yw$GC>0mU$fw1z^O=IAe1?IF z64{N)G=(xUX^HGSrc%SRi2?K>N+=~KGr)L9epA)q--(~-t(51BD&?1A>)3qWy58J~ zUK`;X8GdM0JnZ|`5WFfpP5DL@&<~g-v&SU9(8DRY7M>&mq)GNvjAPuzhxZei=(Ux9 z7Fo-uC9rdwW)G6>r`3^}8V=_t_R&i#B^LvvHM8qPzD#SRIf@iwWbH0k_<5-J(5<~N z6%I@WCJfzLs;U;KS6H8` zqz${u?5X~gFVC%8Q&eSG!K7|4wZ?t#z7u@!-YLti^O1Y+Ub`WE78>8kLm%Lbw#u9Y zmQCrFgviNf@$fpi>>u$Dd290j2Fz*})@zuBjVom_@oKx!9P#&=WYO~~yA%&ON}2_a z%Vc?GY4X;u7zDRmWxLW!JN?6^S|`4w{B`~Fb#IvD}j&Qj<*IxnS*_)+9MegTJ? zS?0KV76LD(3#S%g1Dm;(f>n*KS;)9tmM(9b%iZDO$=p&&y*>%73(9YzX`V`_F9)TS zY_|hH2u-{QkfVFNKj<8X&I;j;ahCi2j8Nh%cbqxJQ{g0c(9UmhtTX$5He(B<@P)3_ zqGgTjm2J(aK}+|0mL=E)xtsnudvX_umrRC$n1F5ti4@^rQNInTdw!78`Af&KJmVz)&A2dvr0aeSCe?q&n~sm=Iq+W-Mrua9^5C)_}T*n`|| z@_-FK%3BFqzHIXoSl;>cj=?gje2^6pOo$FSVUwgju}%{KM}n^oNc0qi6M{1_z) zoazk<4Jr*v4QdUF4XTOCjLCWC)rYxwg?N>C`FKTmKMun`Gj7$kigb)!s zy*pKMrsyROh;$vD$EKJDVEe@TX!<~XIDJC3sb3HhLW|YF zTwp1%5LgM!2bKeif$y0{?lTj@mBVMkIl}A1ox*Fw9l|fe5z61)M~M2Yifoc>nrvch zs%)}sx@@9s%8J$ZZ1q_6*iKkZ*wM zMm7Q8HcKj%D{vv@ms`{#D_F6RK4D8dmiUQ0DFP4vq|HoB2# zes3|=SULI@EEyffL$jBfxigU(Z)_QD!Bc#Y8fOfOek7`Q935w)9e5fYV`CY3Ch~E* z8JQ9q@F#+Hn%%=ov;v}e2MC$jN0ajeziwDoaSm6NOZk=9X8Ej3GiW8Xd;VE-8*tmu zQ7!WeV7%@`(G?&f2pfFmh>_m4dBUFB- z%gEk(j0s78mdo$G5g06z&m!A%Q1)%t4gI= zYRfXH5phHLz7xzFWQFw#Eu^4PU8A^#6Qqr8tK6)+MT>|^_pIWB7qK~V-vSmzTwu^C zvF`xG1`%TUmf9D9W3Zz8Wf8dvTCk{$ z{P`E9U}MCXcORcdG%Z%Pio4X7MUWX*wyL|_mPZgSmY&L(EOCMwk*h*W(l)-b(yHgV zOkNAVUDoP;@CGn%Y|Q%wZ!D7EB@rY}dha$CY4fZ;Hmcl#ErKBV#kd%0T*2PY1mdMK@gBhZMia?)AEZlXXG{(?&03IIW~F!9 zCa>5P_XsBPm>tf2Njl<;L^q|uFCJkfWs_+#^hD;EW1T@x;E<^{yb7-UsC@5_Z^scG z2NYN0jL(m27T+caGo#9myBFVXq86L@Nj$&^>x#yccW_{{^5>W&NutpDH8BdM|#uw3jQpwzn@F|8ajP!_%2Jz2EhNNFAaMYI)X0=Y0{Z&}tWr;T#MmiRl z;&GJM&a#ps3$>4NMr9S(&TzV>_sh*JS`hHb8kbm{+~D*1^vmVc&Y1mMm5}eHU+!1! zjNQr2|5ZiCv1$U9$h;bY#telW@#an)@)bl0=%Fc0aQ zWWIB6q2qQrY%~@t`=xa~lBZ6e@*-S~f3K%;}FY#C9DA{-&VE(v5|8sy+I}6&fj^c@CH~k zKrbM7eqr%Ia+8jhC zt?s2>Tr^}Y+y7}vF`+0OuwddRi!!%sFFPISYTEd#@-AiWwQgBo?P5ZmM1QijZ79l;k4L!cCq2Yb6p%aYLWD^l! z+SEKcq@#bf3Bs?5;fYD~_ zPgesC@Pz~Hxc@Cea2B56P*5PpFe4I(nRidpKZ2BnCpn2M$o0e&+BMy#250 z&N`^BCi(l2KyY^p?ry;e?i$=R5Zs-h!QC~uyE_Dz;10ndxVr>-Z=QX2e<52H*sb@U z1rBwKD*AJ$=bo7}Gu{0yBfMMyB>uauc%aCMn;fF_1-6%umaEvHh771OS?KGx-Ik|! zgnqLqrNus`1$(`xPIwYwe$a<@dKzYWN`bhBb-GY2;=X4Nb7%Oq7jTfN=jDO^M2o>{JP!$y#)+GXuRg zVp0d(Q$Lf}^K!2bL2$(y@*{$OIFib$d@Ngnf|TKBq;KD&FKpiB+Nkw8)v=$l6;SE?so1?s^xGNVz3ij@2GYIbDW)X*6&Uwcd8Sfh4Xsd~iR;d~ZTa z8+&!Mk^v6(#yS35uO1{zHlj5do`E`FYlT2=CsP=O`00`IxK2X%5jWsh`}C6On@wP>$v?=be1XdOj= zToGrBUI+W==1F8`xpSs6W#@H5jG{cP{HcN=US1sv#{lzPg7}1H zZ%4o>u?t|l#- zCTn|2gZ?lqa4DBskK=k_mOZdGUjhM1LO&x&(Be-)f`f|EACQ&M-46$o-Zst}M@kwq z3r(MA5U`s}CyI1si^_q>DXE_kV03@3Ww2#HGBDuxLMHHkO|_ps;=L~#&etX$?1c*!R zA}#JEPumxZGBN?NFUnJ;SyFPem_E4oY+D^U`r*Po z(`cdpu;uBUQM+?!9^!;`+j*R*#pKF|`H8+d34bz!#ysOLL_aE+&Ur-LYlp5VZ3q2z zVe$wH_>7j$#ylGk@&=~IDwjXZxRpHN(KolKUZFZx;j+;h>cMzYH*FW1mK7(d)b(l$ z)=!DoIzz|vNuv@Fe5vog@^Zg2Y~8?LC+auh^EdF2pV&G(%4c*YzNhXGV(ODTi_zX! zc5mVvKffhPBCCGU01Z?kp5}?s)V-1kI`3Vwo$Z)2!F17P*H9e-l~)Mq#gljq{UPy_ zBC)6!Vv&Gn-}(qej*CA*glWK-svJ4c+R#0M036e+UY2kvdH)nfm<2KZwyhW;oS?>l znSS)*?|On?*>wUAP`7kJD9aEjmz8wSIo;?ljp9^`lY+Pcc|g?a(eJxjeUa>9Vs8}2 zjiN#~eP1fTECh!6_&aP~L>vlQ?>deXF%>VyGx##JmPSRSg`T2stEcJa*}qv^vI6BN z?kJkd3jk%KF?$(^3N{D?u}fw@sU(R?K9iGCiEo&6Dq0eo zGM>Tv$jc-G^i_Gb-DpF0t0BhKM>1VEhqD`UUzPb871npPQ|+;$a!QyJ60c+;6%kI$ zMFSbLcT{8LUBbyfrjUP(As#H%7lm>l+)miuSYUL;ccz} zPA9#F6Ou_~Dmr)T{^fIUA`6^GbQzR+KNN#a%;x84YN>uA^TMjMvG$HkyH#Q3TTW@& zb_2%@l#fgY2S6<1(uz46)kx|i=GA+kGZ=nY0$jN1=Yhg=`ZN7D&S4@c1=i{%MOr3UXwkwRW0?t~ijY64-p{m7Q5Q`u6UI$OEQwWt-yf)2OCY86W2%o}{Pn-z@>ZU+B`5 zm2W)}k4ax5(Yk<==8Bkyn=-1ObLrka&-1CP5m>T0vz|FZu73lsiR%7vb{_W8;fY}PQF@W-~keoh0C_G*V{nm@R0ja)3n7PK6My_#&EXc z!c`9vE%MN2Q-M-EKvCen9Fqsusr2Cm`WB+9E{na(b&O39$y!5FL+@6?UVX`rBWN#JU)mTyT+FvcG4BtD$mcvn6M8-r zL^LB}%}jw4!})$<>+7V1ZTTA{IPbn#jX2cA8w0vVWGo;G$b;CQk}e6V<5pfqJ^1#b-huJ zBK!)H?}CcJlRM*-WJD`wsQa?i?S{j;o8Glp$kOp^Z?gDizw>sLc}}II?YdV7xdIU$ z!EHMR>@*oZED1Z^aw`IuW-BaW6u1bunGXEgCIYMF7=NoA>4(VhI@8Q!|Cf5O$y()j zXiU_273*hAaqT`5-asC}6g%L2cKK3$EIZMhU5aW8_jeT!LudnEG(vm27nuTs=5A0C zXf_)L>ZvA=VTvto`>$$FDZ_iWulFQ$+8W4ZdL}5d#f0kPM`u?8adf|`w-m?)QKsPE zi8g;YUS*-Fp(k=Io2rRKHac6})tW1|vWh~L&YVM*F& zy`#~r{1pQ<5m`p3MXk|(Vor`BNS-2{?XrSd=|pG+62@>8ncm;!jWxAOrYl>iTA=eq zMaK6Eo$uLP#X1z9hx@;;YY}T1dlu(FZVNVN-NLDC+#B3sI@-p6th~8esmzg`sGK@} zN1(y*c})4?TvY`)SpQQt2WiQ}xdX1mm$&-4(<7tHG9oybu&@f&UXkr<(b`KbuxCxw z$e0S`o`MN9G+C#_cQ3yrGc)+{MQpK33}&nlrmIxF4bYq>5GBG*8!tnZ${3ZoqD1x| z%7Ca;ZV9P^V0FTR2@{O^AdorUI)gz(gn^@56 zDg>GY)NXUk(BoU#!;q7JBSuw=3GZUcQ|*22nKeHPvA_Xa$IwD zv2Sr+TkT+L-#mu1#WpfG?QRJKY$j=czMSY#8+#~eexVY=Z&s9RjbY;_qxO2=%rv=6 z5A0Mi&G0oY#7elBve2qb{HKh}0y?4Nax&Pk4%Vr5fuIowYRt4mmfL(ZJ$jzk6hT~d zCrCv-Ii7;Y$}(GZ5L1^?4m|ebdgmxHuUjdA54;qL%cby@z;V9{hC&4LR6>Ajg1*l= z;aw5=M2Lhv3J&v%jHL)WH!%Q`NE=p|TKo-7A%$h9 zO&@Pombc?DV<*yOdK&0*6O07ZvJH4dmWTcu!5L(|@h04w;5IcpRw=U=YHwc^pnTEJ zfP$+#LO^rlRRk2;DW7wQ%$g;qx0)z!Q3Fz(lj5+?IGb_s)j*)8I#nw>WE{cBN|3Z@ zr228>yL^h^X*QJ+=!Gld7l7kH>QPm$b&B|cN`J2noz*x|@U*L~4`|O?kMu{k z7Y8HCZivLUWeb{l%H}G(;4RaF#jUXvIxY}ztu~i42bZh&@AtFg97Kp&7@bhQC`2ve zx^X?=K7LL$DVzQb4=58FuKA3S)cQ{&>AqUCIgSFnra27^N?EPwyp2}ON zAG>i{cSk=?`*-^fY20r1FAjEY;^qullpNw%a;3+&9Uo3^2B)=}T5e94dMw;-Zzr`< z2bCUNk3Zaezd8#uVt?SqzUN4q(pp|B>f173|uU9kypS;~abZ zn~bjb_}NaYF6mj>xpwvLz48!w2MI}je}e;)KTeI36WZWc2|fp2xvAZBgi$FKHSJbW zY~~@$Gd)2ATrVj zzYSmKx^9bJwr0tv*zRP>(eOTC871Q7T)@zn4d48#z(AF{*oC54S~$9ejy*f>%0yQz zk-Us7giH}L&kw>lJW*<~x;T%ulW^iG@LGerod_TDO8jMB(D@O9eP5DNRYfmSPNAuI z({tH!Wh9Iu@f-Zc;Vk=CctqMYQfd)FIx?!2@F}B_p|s#+g7N+4$Cc=v5vkT>xi!6d$nA!q?hqYn`U+t zxKkbl!&ydsFUgesixW^fh{q!|@nVp_m5B5q`Iny97>EW`qxHj*UwDoZ?TcvNqh;=j zQd?rdkaZ@vQ(Gg}5F5D?dsfhljXMTNwo+KZP%;OiIqU0$7>mhb_JA$Pi5X07m=;(T zAr!r9D}zQ9t@b!J4}li(3*=Z6PUv49sE|2WdsQsAL)Gcjd$7li_zEUEuM(I6O7e@cn%G^>DX9(o{-Ae zmXg{0!$j3oGO?<~z8ZRnC|s4`tmJbqG-Nbk^B8=i(Hlewj{SY~js$EU3Zjam$*lvI_*%LIS!jIF(X)cSyj(F|{lfy?vp+DPVDsL+n)L&%JcI z$@V&UqGw+i9l|1pb9C&xBemRU7L5{}#3Hm{3I6K(C%W=>kGOEsOI&7z0+cn7jyR@a zA)ybV;mL#W6dEsvSjw6Ov`%=VL3iTMYk^0!H!<@P5tR_pbg7t!rVi-J1YQn6 z5|dkPlD)70l47w|=!b3)MAny|cEz={bB`a$g>-T@6i$2orq5jTY%wKVQhtd^=_R%R z+qChJmIn31HKtVrPwwH7C0l?frIgV&$}2EQ)mX1|k>aTWR*o^l*)bz%Z;3THY|qm! zRv%7I!IP9Og%1X5__1b_S94sn1q%t3UKwJWX>#g5=fMyonn6Z^XlPq_JnXtggIdj!!oq4bALlU;E`Tl z^bps@DK+*)0z@!CfJ3QeF>Ytv0?R#!v1D@sg)V}LtIDCf`#FOb-8J(3z`KnVGX5wO znp(fMJgOBy!7muyL+~4tmT=-PV>^quqZ|wN0VAWlD(1;@gU~>=edFfx{8uaxRMx>HA73b01*tGAR5E61ZirjKz{1D z0X5EJ4$@Q=1N}6Z1BU_)oc{@PcnTUN7^q;t8)DHnu^Du5SZ1yMM|z?zPj?&-s1`ry zW!ftNH+q41LpFKt>z_3-W!`i|7BblV=A8(BZm(x&!P`FRoo8mjyF&h*1>gI>kOhya zoX7|$iJ7?dcS%fLvd>@tlPq{oql-?t-~1jRjjyr^C=9a#|BRuIZb80h;JhBTNPNr^yTFH8e=nN)A#Yj-THbWuCo3J ztA!Kju1As?5g)@lNuB6B!El-A5no^~!qcDNr6h2Se_pVMf{VADTRa_1NMJM^O19as zU6Rg@NRPvEYBWvi_WXLwmn}<^WqpqAwRvdZm$=U|72&~o0hLcGE#jqpybxHOYvcA-pCU~D0l#@6gz+ip^)$*A(M zk%@u4y@{ZaeBABHIr9|uwd9dwJ}<~HmTi;Axsp$^qlDQ$*SF`%EQfndFu#KfnCx2h zEAygntIN*GD6*|4ch8c%)~l-3;*LA%y=&BP1(OO0-C5-zstPex(lD32baZk4k+O3M zV~UQAxyV{!ROv2`S7#@GT*p}lr?cslj10{yzgf9R=TvxVc-bOW+pyyEj`*S5bh5^9 zf5(+tVPHBv&UThCsD%16b*DaOr;zwneOS4!1iIJ^od|1zZzd8C5|L?yf4kk>9bEt? zYT$I_Jn}$t9`Qg{z=$uD*&RSREg=YNY>ZzvXux1Ih8PC;PKqnb72`&yg!?0Z0wPU~ zJQ9Kci=t#DTFE889q{tDBP8-jCt|aNNW;M1;)dmf!gf>{vgRP~{k$g#6|CKXPb}1W zj*(yu{vDN>e@mPf_y#k#lBMwGV=M`S#hpA1BdBw}PhMIu2)CjI$+0t&{TxJv`rG&oP&9 z7a$$v{!m+Ak&R$I3&u6^Fb{0qgEsd5NBL=z)p>Z*{UlDJO#^#9J0uo~sfnW3aQSyr zhx6{$P;)`v17w^@Vxzux`#iY&^!8Vdm)|2G2a|VE2u~*5-TO7NH;D0EY?3A`$Ua|2eAC!Y2YKE=w*B3-`OX4b0M+UD=~Acs zft8vDH zeP+MR*ijX=?OB#__I#bRM{?});U}%7z}dX;9GEQlJfYoP!l2T=HlAi#!cE`fPZK*w z;ag2LWuIlnM$DaN_driw*vXmE~s_yPXC^-zPI`z=;t z<%`|-mMOM#mD6L0@+jN_Rnp_QSAN{X^DxV3ORsGROidj=42T7X_;su508@)2|M8pgQUu+C1r3oTVplE@^_0|`yn(Ah=DWX_3bd2fvgnt@S8X<#t&p)P>) zev1i`x$zp1RHr4oclY2C4l-oN6563t z6Qj9gX3FUd9HaLJ8+Nc3&mCCVbgG9ZLS=xE9vb}41Be)!;!4r|;dxJ^g&S?HVadvw z`{T`nHp20xXHVPqf0{o2lj1e5Ab(HW?f(Y?(qN3@8Ey?z2|?!@5*#Piw14~w|>UOA&r=z{qn?fdf0hx-J}Qqu7G5$T^PdGz)yZBcs#~ghJJaGiVw8^{9hJ(js19c#G696o6BmvixibGCM zdT0CZin{ROTI!Jrahlkg(JGOu$l=O;|6Wt~N1b-T8qD9S1Veqo7r(!^T={F^XWuc- zPrkS3E-+sFPmNUZ{}`zNcT9H0Un*3!@&AbmRmVL9^Y2;*=A5*!e6IkNa?yZ*2>z^2 z_7ANxx|1rVh?%=bnvxgVqGPspqj>)2=ddcqZ9$~A zATwZn0l;k zU1C=W6_%M{Q_7MB;=-LJqJnEOky%`CYSKjb!0OperidG-oegIdafBJ_VwZ}dDTr6= zXxKP&k3{MdxT>MP*j^ATg+u4_QHhvoFeEqonzrvguo)#U)vOmeA6zD!{$O!GMRI!+ z8{)Pcg^~I-KCdhct>#c`#!bJ2U};@kqYqxv1&{#`l~~oR^DWlsG}IV#e02IvI}2!q zJSBp9j2oS6L64TiyO?(|i?alIlB#rtS3#z9mrVg;P1Iy;rQMbiPU7%pjlP*XAHUmj zz6p-x&oO~|r#PjaC9$#v9!(m6C>k3t>MsCWs)p5P9h3GUP+ufNNcy{u1}MgQ`+10Y z_OaPrm$if%;--Qq(rA;>Zj;=Yq-n()M zX>y%9LxXN6t6NE5tXWRB{7iTf{MZW-xW!O>j+PG21e6=rSvk*3!-dGYz(gUNm05?_98 zIls?bj;wr`ZUW(`T^uSBEU}*uDVB3)-C7}u38RL}G$K3--s`<~A*&lCPUy;k=*B)B zWJi?Ap0a4lOKcM0?N&NBR;&j-&fpyUuq_h0AUbFq-RMfmDF+mBIVAXx2Y4xTXmdcq z(b$v5$hEpr7hD-)7l z(vpZu)~F>?g20;%>pl0@$C?jS=ay`;-{KMJeP;?5uWy^v=iD4Bq6RVK57P5~pd1}p zZTO=fTTVls?;glv;2@PXx+V>+abIBYD{zEnX7n7PA5E#PX)`*yjD@t-W^Iz%WZ~o1 zc)5PuJ@7$Ujyc!}9#$qkZckWQeig9~2RbiK!_U9jJ$SR|-RSerKvAmW} zn>>gk@dI39aYQYWF^Jp@M#gRM!LISR0yHu46Wg`Jxml}T!z^cw#s-||gp>+Td#Wfm zrG{0k<+-d*f6-#YF8KPIj+xV2h<lc}$nY6wbtocVF*J05 zQnoCWr3JH!=Oq&%gu|KuEROj7If=Ko^!}5$PDAYi7ziH9jq<$ddmV$ zL~M7kL9NobJx7bwer%^36qh91o{`rrnuc)A+<+mjCJaP=ckze4kgO6}XRpflM^&0% z;0tQdE4T@a;>NPfRBN*kn0q6d$%HTXICk#P#$Iz-Z^VToY|uc!qp?Z5`mlVK2&z$% zhM)0~6h4C}5)qNT9tCeoCOPu76UPIMF;zzT-o74WE=*#1v9%b5Pk4S63|<*~jVa}z z8u$$Xsp#4}XZQ!YTSi}j;;fq3RCD!=D^61wOR8}N^`g05Bxo=i8k?2s$X09@DIQli zq}`7R(R{K<)6kq4%F8gtcpge#Kq~e$Z3E8rgweXU5}e>{a`0S=e@%8t*h{@{=Gce0 zG&#!lRW1tscqO)Wc`J8==;5L9u-~xehtg&&iMcV6$lP%lY2&*nEL4cas z&W=XZ`H^6`=xs+w-t)4&ieX0HlS$)og5Jgv1GU5k9C)yf(8nss#DF{ugj82ob3Dl2 z3?DAtp*>!|yx{iD#{$ew&8UFd$bX)lY|M2mscqle*lN+!=w{@bw zhK3_TC*lOxiGlYz0*l{@3V>@d$>0HBoSTE43xykqzbcccnA_ezOm=Z`+q^xfuiPcQ zOWmMg&mV3p$1PnVtRY~V|4q^-1v#v1+G{;D5 zF<-^sWR1Lp%+Mp1kb?ihuNGNz)D{RTA&OMJAI0{yv4)J}`;GY@D_2VnT1$hc6QXJ` zX(Z`Ajx8#hnyX7^MA}}~0l9(J!uLF226GBf3DYI6WcfU*9x0<~39v=Ho1KxF^)z%& z^fPKSu|X5fBjj9z$JR2RR4dCaPNx=IZFF34fw5UUD31z&CV%K{+NYzX1{!2;@XP__ z%rT%q^(>b%Zmd&d(Hh6O%~?7b+||BKD~~AOqPz7&s%efdRSx#x|`Q%(om2UMm9g#k$nDZ-I!FV!-1`|bCfoO(F-O%( zNmd+>i3Q%)5|b#yhknv9*uG`-N$2GP(=131C)XTqpaK3cu8CFT@-B%)E9Ll|^?N3$ zxgml>NJWI2AS&)9T>)N%G(=iiQgkFK6hB}R zjd|s(LD@VJWD(~jl~7${V)U}tmf z;e|TmU86G+_O+_(*wxdxDBBW|87T<|vQd?+DKb>0c)o9cq>IEjD;R9gDI^1CH=aJ- z;c_O8b@0J*{>0zko6Xw%xwt~FIb;u(;EwyI;uTCt2I$$gDfiG3*eF;Mm&ix|k<*IQ zH(IZIIk)B>pMcu%MJ4r6zAZAp zG`KvLMHka3+dg;(%-57!*3{NK^bXE>a180~h8Pfm4#sOP5$_Tz1+mXrNltsd#B%v) zYU<1RH6_G&P&1M|cx}18Atn5&J4AHs=Aw6tn%iTs-DDlvv)1f`CxuLddF*OEejuse zl^nbLK(V`Z#wEBO&JlGww@f-@hQCpn3aRNX%PBM@Ef3$KF?}7LKV|*lK~3hH*s8{A zCPqXmFb^J>K|VVP(j8jKYCah~)|4_IfVM`|&9&)+y6J$XKPAPJ+!B-CU8jO>GkWos zw(-0m-lKyxbuGD+wg#t9Tw9$gFnh7jKBDIQt7W}2<>$f#g~UQ8_EB9?Dg;sJgs>9+ zOS@TG?iNAtW8L)jj|*2W)I&u|9QN;g*yEWBa_Qa!DM+G+R__{3r6^Yy(JSlp=FRWA!vy>>>s;dmitq9Z-O1hx)LSH> z+beKfa&tjLK{b(R*%8`YK98Z+T(x9_xZAF*PYzb2^l)>+)kQD5Fhkdp!lWk^+6$nP zDzO8!Y&mRy)ZLD9fFTgOIHu&R+EbC#@0y&We96~_#<3ViFepdD+oazZYK|$XU4S=! z_-#7@iI<`&8=*?U`^XHGuvrR$|Sc;J~q5i`=V{a!&5^hkA%!Uyp4 zd336%t?j`pRyYhEBoY!s_{vy=UtvMs10RFQPn4M}yHJN~7bN^FxnFsO84@NdbYx2+ z1PX%88p>Ej$HiV$N_{YSt14&MLq@c|mraX8roIYMAs!$mI~8%06Fv3q1uurIwyS04 zV!R#KQXvaw<>I`zLKzdREXXY$e7ryvOvU=*L-u!ka00b+PvWuY$h(|WzDVCPtk=a( z8uis0TC+(U^_77XNzGyUlQth~+1{pVb%zrPwn~4La#Vvu)Uk#Vl-<$nb;@$tp=z7V z5Y)bAUNJ(~!K;^Xw&{|(R_Ij8ZI2D){t_;Ru0~icxr8plyk8}RtyWLIV5r{tu5_2Q z5K=0S9a@YIPu}a8c0AZ(3ht&8?bCJSAyT8X2$ z#`~tjKDA4SDVZFPA^3h&^(((xm+RVffBxF#{^>c~W6x8tzQoG`+(GSt>tCM#51!S2 z?mGiSCgYT4k|VU*0TsJHWcPL56ylJa6tdCMvrJYKRo4+MmQ_+!=dm)rrR8Q6^kI9O zUP{eKOU%etedUK19-*3Ul4V$AcvG2OM^sKUNzFPjJIl;khtx|+5kT8FAgS zO_`_$`TaET^Md@_)XyO)%k%9&P5reo^IUs?fBM%K%%iuK1o-sgrLeMkzZ~@j#0oP+j4ay%Ip`}ItTn1)S>Ha@+5^x{2gBsAXKR61&cy>eH z3fUrc09^M|mhCY_67UiIGvwJVF=L6AF$Mr=0Tb_IZy8}c0bzKqw<72nhh70fE8Jjz zXHa52!C-tY0}A${&jjEsGr;w@9RYYxFqodpK%2gC?~wxnS_}G}A%Xt{gZa6PBe-~P zcR+v015!F3x5LEl3C5e}G6Dn7q5k>leazrt|fg<3jYKHpp^Z*k@vCS^{*p~ZD@Omy_<`_5 zI{^C8&tp7RNB)&zgY*Ogpq2bQ#$&bMUl~2fPcQ)5!Ovqn*7f|A;fe7C1EBKxJjP>r z%wHKPm`^YOVwullJl2@}mC=m#1OuQr`8>vBUASKvW`NuOkCQDxCGL5Q#{x&cGQdfm zU;yNfp2v7B=JPAVgY*doK-%YdjK``kzcL!hpI`tqV4lZ#oHYI`V_EhI1|Ws}d5p)I zrN1(86`x>OK9})0J@Z!vx6%^~K(glZ+VMDX=~sro`V$O58q@O_kAt>Bc{>tz*c!B{4`h6bbaV*%cj4r^U3-$-IQ^0HZ#zNH<2~M({gnse@dw_&?f5;9_IUU3 tSK6TG-?abI^5fmHUo8uJ|84o%9s&iNlL7(J0lvKufq*zZ0hVh({|^+noxlJ9 literal 0 HcmV?d00001 From 4f70ee521f67cd7df3826b370899aae13dd47e25 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Wed, 3 Aug 2016 10:41:25 -0400 Subject: [PATCH 012/103] Migration Guide changes for BlobContainer (#19731) Adds a notice in the migration guide for removing two deleteBlobs and one writeBlob method from the BlobContainer interface. --- .../migration/migrate_5_0/java.asciidoc | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/docs/reference/migration/migrate_5_0/java.asciidoc b/docs/reference/migration/migrate_5_0/java.asciidoc index 7dcfaaa9de5..83a235a1838 100644 --- a/docs/reference/migration/migrate_5_0/java.asciidoc +++ b/docs/reference/migration/migrate_5_0/java.asciidoc @@ -377,3 +377,26 @@ in favor of using `addTokenFilter(String)`/`addTokenFilter(Map)` and `addCharFil The `setTokenFilters(String...)` and `setCharFilters(String...)` methods have been removed in favor of using `addTokenFilter(String)`/`addTokenFilter(Map)` and `addCharFilter(String)`/`addCharFilter(Map)` each filters + +==== BlobContainer Interface for Snapshot/Restore + +Some methods have been removed from the `BlobContainer` interface for Snapshot/Restore repositories. In particular, +the following three methods have been removed: + + 1. `deleteBlobs(Collection)` (use `deleteBlob(String)` instead) + 2. `deleteBlobsByPrefix(String)` (use `deleteBlob(String)` instead) + 3. `writeBlob(String, BytesReference)` (use `writeBlob(String, InputStream, long)` instead) + +The `deleteBlob` methods that took multiple blobs as arguments were deleted because no atomic guarantees can be made about either deleting all blobs or deleting none of them, and exception handling in such a situation is ambiguous and best left to the caller. Hence, all delete blob calls use the singular `deleteBlob(String)` method. + +The extra `writeBlob` method offered no real advantage to the interface and all calls to `writeBlob(blobName, bytesRef)` can be replaced with: + +[source,java] +----- +try (InputStream stream = bytesRef.streamInput()) { + blobContainer.writeBlob(blobName, stream, bytesRef.length()); +} +----- + +For any custom implementation of the `BlobContainer` interface, these three methods must be removed. + From f97b1a94b83587cd80a9cb9b72488ae4fcd5ee50 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 3 Aug 2016 10:49:19 -0400 Subject: [PATCH 013/103] Add 2.3.5 to packaging tests list This should make the packaging tests happy again. --- qa/vagrant/versions | 1 + 1 file changed, 1 insertion(+) diff --git a/qa/vagrant/versions b/qa/vagrant/versions index 98cb5bf8195..dc73cb6e2a4 100644 --- a/qa/vagrant/versions +++ b/qa/vagrant/versions @@ -12,3 +12,4 @@ 2.3.2 2.3.3 2.3.4 +2.3.5 From 51bbe2c5c47d07d0913f533e882cdf6c4d3e0efc Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 3 Aug 2016 16:56:10 +0200 Subject: [PATCH 014/103] [TEST] fix log statement in ESIndexLevelReplicationTestCase --- .../index/replication/ESIndexLevelReplicationTestCase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index b52c8fe9bdb..27eb753dfe5 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -316,7 +316,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { // OK! result = Store.MetadataSnapshot.EMPTY; } catch (IOException e) { - logger.warn("{} failed read store, treating as empty", e); + logger.warn("failed read store, treating as empty", e); result = Store.MetadataSnapshot.EMPTY; } return result; From ca8f666c66495c0307ee2c2f9b4d8c7b8237a629 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 2 Aug 2016 16:09:14 -0400 Subject: [PATCH 015/103] Add line number to yaml test failures Old: ``` > Throwable #1: java.lang.AssertionError: expected [2xx] status code but api [reindex] returned [400 Bad Request] [{"error":{"root_cause":[{"type":"parsing_exception","reason":"[reindex] failed to parse field [dest]","line":1,"col":25}],"type":"parsing_exception","reason":"[reindex] failed to parse field [dest]","line":1,"col":25,"caused_by":{"type":"illegal_argument_exception","reason":"[dest] unknown field [asdfadf], parser not found"}},"status":400}] > at __randomizedtesting.SeedInfo.seed([9325F8C5C6F227DD:1B71C71F680E4A25]:0) > at org.elasticsearch.test.rest.yaml.section.DoSection.execute(DoSection.java:119) > at org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.test(ESClientYamlSuiteTestCase.java:309) > at java.lang.Thread.run(Thread.java:745) ``` New: ``` > Throwable #1: java.lang.AssertionError: Failure at [reindex/10_basic:12]: expected [2xx] status code but api [reindex] returned [400 Bad Request] [{"error":{"root_cause":[{"type":"parsing_exception","reason":"[reindex] failed to parse field [dest]","line":1,"col":25}],"type":"parsing_exception","reason":"[reindex] failed to parse field [dest]","line":1,"col":25,"caused_by":{"type":"illegal_argument_exception","reason":"[dest] unknown field [asdfadf], parser not found"}},"status":400}] > at __randomizedtesting.SeedInfo.seed([444DEEAF47322306:CC19D175E9CE4EFE]:0) > at org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.executeSection(ESClientYamlSuiteTestCase.java:329) > at org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.test(ESClientYamlSuiteTestCase.java:309) > at java.lang.Thread.run(Thread.java:745) > Caused by: java.lang.AssertionError: expected [2xx] status code but api [reindex] returned [400 Bad Request] [{"error":{"root_cause":[{"type":"parsing_exception","reason":"[reindex] failed to parse field [dest]","line":1,"col":25}],"type":"parsing_exception","reason":"[reindex] failed to parse field [dest]","line":1,"col":25,"caused_by":{"type":"illegal_argument_exception","reason":"[dest] unknown field [asdfadf], parser not found"}},"status":400}] > at org.elasticsearch.test.rest.yaml.section.DoSection.execute(DoSection.java:119) > at org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.executeSection(ESClientYamlSuiteTestCase.java:325) > ... 37 more ``` Sorry for the longer stack trace, but I wanted to be sure I didn't throw anything away by accident. --- .../rest/yaml/ESClientYamlSuiteTestCase.java | 23 ++++++++++++++++--- .../rest/yaml/parser/DoSectionParser.java | 2 +- .../yaml/parser/GreaterThanEqualToParser.java | 4 +++- .../rest/yaml/parser/GreaterThanParser.java | 4 +++- .../test/rest/yaml/parser/IsFalseParser.java | 2 +- .../test/rest/yaml/parser/IsTrueParser.java | 2 +- .../test/rest/yaml/parser/LengthParser.java | 4 +++- .../yaml/parser/LessThanOrEqualToParser.java | 4 +++- .../test/rest/yaml/parser/LessThanParser.java | 4 +++- .../test/rest/yaml/parser/MatchParser.java | 4 +++- .../rest/yaml/parser/SetSectionParser.java | 2 +- .../test/rest/yaml/section/Assertion.java | 11 +++++++-- .../test/rest/yaml/section/DoSection.java | 11 +++++++++ .../rest/yaml/section/ExecutableSection.java | 5 ++++ .../yaml/section/GreaterThanAssertion.java | 5 ++-- .../section/GreaterThanEqualToAssertion.java | 5 ++-- .../rest/yaml/section/IsFalseAssertion.java | 5 ++-- .../rest/yaml/section/IsTrueAssertion.java | 5 ++-- .../rest/yaml/section/LengthAssertion.java | 5 ++-- .../rest/yaml/section/LessThanAssertion.java | 5 ++-- .../section/LessThanOrEqualToAssertion.java | 5 ++-- .../rest/yaml/section/MatchAssertion.java | 5 ++-- .../test/rest/yaml/section/SetSection.java | 13 ++++++++++- 23 files changed, 103 insertions(+), 32 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index aed60658a76..2e29721f06e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -297,7 +297,7 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { if (!testCandidate.getSetupSection().isEmpty()) { logger.debug("start setup test [{}]", testCandidate.getTestPath()); for (DoSection doSection : testCandidate.getSetupSection().getDoSections()) { - doSection.execute(restTestExecutionContext); + executeSection(doSection); } logger.debug("end setup test [{}]", testCandidate.getTestPath()); } @@ -306,14 +306,31 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { try { for (ExecutableSection executableSection : testCandidate.getTestSection().getExecutableSections()) { - executableSection.execute(restTestExecutionContext); + executeSection(executableSection); } } finally { logger.debug("start teardown test [{}]", testCandidate.getTestPath()); for (DoSection doSection : testCandidate.getTeardownSection().getDoSections()) { - doSection.execute(restTestExecutionContext); + executeSection(doSection); } logger.debug("end teardown test [{}]", testCandidate.getTestPath()); } } + + /** + * Execute an {@link ExecutableSection}, careful to log its place of origin on failure. + */ + private void executeSection(ExecutableSection executableSection) { + try { + executableSection.execute(restTestExecutionContext); + } catch (Exception e) { + throw new RuntimeException(errorMessage(executableSection, e), e); + } catch (AssertionError e) { + throw new AssertionError(errorMessage(executableSection, e), e); + } + } + + private String errorMessage(ExecutableSection executableSection, Throwable t) { + return "Failure at [" + testCandidate.getSuitePath() + ":" + executableSection.getLocation().lineNumber + "]: " + t.getMessage(); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParser.java index f56630e4b6b..eda0f728f93 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParser.java @@ -41,7 +41,7 @@ public class DoSectionParser implements ClientYamlTestFragmentParser String currentFieldName = null; XContentParser.Token token; - DoSection doSection = new DoSection(); + DoSection doSection = new DoSection(parseContext.parser().getTokenLocation()); ApiCallSection apiCallSection = null; Map headers = new HashMap<>(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/GreaterThanEqualToParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/GreaterThanEqualToParser.java index 1ed71075970..65a46d139bd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/GreaterThanEqualToParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/GreaterThanEqualToParser.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.rest.yaml.parser; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.test.rest.yaml.section.GreaterThanEqualToAssertion; import java.io.IOException; @@ -31,11 +32,12 @@ public class GreaterThanEqualToParser implements ClientYamlTestFragmentParser stringObjectTuple = parseContext.parseTuple(); if (! (stringObjectTuple.v2() instanceof Comparable) ) { throw new ClientYamlTestParseException("gte section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName()); } - return new GreaterThanEqualToAssertion(stringObjectTuple.v1(), stringObjectTuple.v2()); + return new GreaterThanEqualToAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/GreaterThanParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/GreaterThanParser.java index ca76d486ab4..f6c53272067 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/GreaterThanParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/GreaterThanParser.java @@ -19,6 +19,7 @@ package org.elasticsearch.test.rest.yaml.parser; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.test.rest.yaml.section.GreaterThanAssertion; import java.io.IOException; @@ -30,11 +31,12 @@ public class GreaterThanParser implements ClientYamlTestFragmentParser stringObjectTuple = parseContext.parseTuple(); if (! (stringObjectTuple.v2() instanceof Comparable) ) { throw new ClientYamlTestParseException("gt section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName()); } - return new GreaterThanAssertion(stringObjectTuple.v1(), stringObjectTuple.v2()); + return new GreaterThanAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/IsFalseParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/IsFalseParser.java index 8a41df99002..3d9593ce290 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/IsFalseParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/IsFalseParser.java @@ -29,6 +29,6 @@ public class IsFalseParser implements ClientYamlTestFragmentParser stringObjectTuple = parseContext.parseTuple(); assert stringObjectTuple.v2() != null; int value; @@ -43,6 +45,6 @@ public class LengthParser implements ClientYamlTestFragmentParser stringObjectTuple = parseContext.parseTuple(); if (! (stringObjectTuple.v2() instanceof Comparable) ) { throw new ClientYamlTestParseException("lte section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName()); } - return new LessThanOrEqualToAssertion(stringObjectTuple.v1(), stringObjectTuple.v2()); + return new LessThanOrEqualToAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/LessThanParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/LessThanParser.java index f244bd68f93..0caf12ea91a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/LessThanParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/LessThanParser.java @@ -19,6 +19,7 @@ package org.elasticsearch.test.rest.yaml.parser; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.test.rest.yaml.section.LessThanAssertion; import java.io.IOException; @@ -30,11 +31,12 @@ public class LessThanParser implements ClientYamlTestFragmentParser stringObjectTuple = parseContext.parseTuple(); if (! (stringObjectTuple.v2() instanceof Comparable) ) { throw new ClientYamlTestParseException("lt section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName()); } - return new LessThanAssertion(stringObjectTuple.v1(), stringObjectTuple.v2()); + return new LessThanAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/MatchParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/MatchParser.java index 35f3fc160bf..f456c32d092 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/MatchParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/MatchParser.java @@ -19,6 +19,7 @@ package org.elasticsearch.test.rest.yaml.parser; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.test.rest.yaml.section.MatchAssertion; import java.io.IOException; @@ -30,7 +31,8 @@ public class MatchParser implements ClientYamlTestFragmentParser @Override public MatchAssertion parse(ClientYamlTestSuiteParseContext parseContext) throws IOException, ClientYamlTestParseException { + XContentLocation location = parseContext.parser().getTokenLocation(); Tuple stringObjectTuple = parseContext.parseTuple(); - return new MatchAssertion(stringObjectTuple.v1(), stringObjectTuple.v2()); + return new MatchAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/SetSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/SetSectionParser.java index 2686593e10a..c7797e42d57 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/SetSectionParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/SetSectionParser.java @@ -36,7 +36,7 @@ public class SetSectionParser implements ClientYamlTestFragmentParser stash = new HashMap<>(); + private final Map stash = new HashMap<>(); + private final XContentLocation location; + + public SetSection(XContentLocation location) { + this.location = location; + } public void addSet(String responseField, String stashedField) { stash.put(responseField, stashedField); @@ -42,6 +48,11 @@ public class SetSection implements ExecutableSection { return stash; } + @Override + public XContentLocation getLocation() { + return location; + } + @Override public void execute(ClientYamlTestExecutionContext executionContext) throws IOException { for (Map.Entry entry : stash.entrySet()) { From 4805250ecff95d2fc74c5f2fb5a0babed6278154 Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 2 Aug 2016 15:12:19 +0200 Subject: [PATCH 016/103] Throw ParsingException if a query is wrapped in an array Our parsing code accepted up until now queries in the following form (note that the query starts with `[`: ``` { "bool" : [ { "must" : [] } ] } ``` This would lead to a null pointer exception as most parsers assume that the field name ("must" in this example) is the first thing that can be found in a query if its json is valid, hence always non null while parsing. Truth is that the additional array layer doesn't make the json invalid, hence the following code fragment would cause NPE within ParseField, because null gets passed to `parseContext.isDeprecatedSetting`: ``` if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (parseContext.isDeprecatedSetting(currentFieldName)) { // skip } else if (token == XContentParser.Token.START_OBJECT) { ``` We could add null checks in each of our parsers in lots of places, but we rely on `currentFieldName` being non null in all of our parsers, and we should consider it a bug when these unexpected situations are not caught explicitly. It would be best to find a way to prevent such queries altogether without changing all of our parsers. The reason why such a query goes through is that we've been allowing a query to start with either `[` or `{`. The only reason I found is that we accept `match_all : []`. This seems like an undocumented corner case that we could drop support for. Then we can be stricter and accept only `{` as start token of a query. That way the only next token that the parser can encounter if the json is valid (otherwise the json parser would barf earlier) is actually a field_name, hence the assumption that all our parser makes hold. The downside of this is simply dropping support for `match_all : []` Relates to #12887 --- .../org/elasticsearch/common/ParseField.java | 2 + .../index/query/QueryParseContext.java | 6 +-- .../query/MatchAllQueryBuilderTests.java | 12 ------ .../index/query/QueryParseContextTests.java | 2 +- .../test/AbstractQueryTestCase.java | 39 +++++++++++++++++++ 5 files changed, 45 insertions(+), 16 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/ParseField.java b/core/src/main/java/org/elasticsearch/common/ParseField.java index ed00394640e..7121be7d1d8 100644 --- a/core/src/main/java/org/elasticsearch/common/ParseField.java +++ b/core/src/main/java/org/elasticsearch/common/ParseField.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.logging.Loggers; import java.util.Collections; import java.util.HashSet; +import java.util.Objects; import java.util.Set; /** @@ -108,6 +109,7 @@ public class ParseField { * names for this {@link ParseField}. */ boolean match(String fieldName, boolean strict) { + Objects.requireNonNull(fieldName, "fieldName cannot be null"); // if this parse field has not been completely deprecated then try to // match the preferred name if (allReplacedWith == null && fieldName.equals(name)) { diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java index daf0f6838b5..478bcbc51d4 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java @@ -109,13 +109,13 @@ public class QueryParseContext implements ParseFieldMatcherSupplier { String queryName = parser.currentName(); // move to the next START_OBJECT token = parser.nextToken(); - if (token != XContentParser.Token.START_OBJECT && token != XContentParser.Token.START_ARRAY) { - throw new ParsingException(parser.getTokenLocation(), "[_na] query malformed, no start_object after query name"); + if (token != XContentParser.Token.START_OBJECT) { + throw new ParsingException(parser.getTokenLocation(), "[" + queryName + "] query malformed, no start_object after query name"); } @SuppressWarnings("unchecked") Optional result = (Optional) indicesQueriesRegistry.lookup(queryName, parseFieldMatcher, parser.getTokenLocation()).fromXContent(this); - if (parser.currentToken() == XContentParser.Token.END_OBJECT || parser.currentToken() == XContentParser.Token.END_ARRAY) { + if (parser.currentToken() == XContentParser.Token.END_OBJECT) { // if we are at END_OBJECT, move to the next one... parser.nextToken(); } diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchAllQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchAllQueryBuilderTests.java index ef8a4ffc28d..9195fc83ecc 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchAllQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchAllQueryBuilderTests.java @@ -24,8 +24,6 @@ import org.apache.lucene.search.Query; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; import static org.hamcrest.CoreMatchers.instanceOf; @@ -36,16 +34,6 @@ public class MatchAllQueryBuilderTests extends AbstractQueryTestCase getAlternateVersions() { - Map alternateVersions = new HashMap<>(); - String queryAsString = "{\n" + - " \"match_all\": []\n" + - "}"; - alternateVersions.put(queryAsString, new MatchAllQueryBuilder()); - return alternateVersions; - } - @Override protected void doAssertLuceneQuery(MatchAllQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { assertThat(query, instanceOf(MatchAllDocsQuery.class)); diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java index e6b55b0ec59..5568d2fa5a7 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java @@ -113,7 +113,7 @@ public class QueryParseContextTests extends ESTestCase { try (XContentParser parser = JsonXContent.jsonXContent.createParser(source)) { QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT); ParsingException exception = expectThrows(ParsingException.class, () -> context.parseInnerQueryBuilder()); - assertEquals("[_na] query malformed, no start_object after query name", exception.getMessage()); + assertEquals("[foo] query malformed, no start_object after query name", exception.getMessage()); } source = "{ \"foo\" : {} }"; diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index ad2f6974607..215bfe5f18b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -122,6 +122,7 @@ import static java.util.Collections.emptyList; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; @@ -313,6 +314,44 @@ public abstract class AbstractQueryTestCase> } } + /** + * Test that wraps the randomly generated query into an array as follows: { "query_name" : [{}]} + * This causes unexpected situations in parser code that may not be handled properly. + */ + public void testQueryWrappedInArray() throws IOException { + QB queryBuilder = createTestQueryBuilder(); + String validQuery = queryBuilder.toString(); + String queryName = queryBuilder.getName(); + int i = validQuery.indexOf("\"" + queryName + "\""); + assertThat(i, greaterThan(0)); + + int insertionPosition; + for (insertionPosition = i; insertionPosition < validQuery.length(); insertionPosition++) { + if (validQuery.charAt(insertionPosition) == ':') { + break; + } + } + insertionPosition++; + + int endArrayPosition; + for (endArrayPosition = validQuery.length() - 1; endArrayPosition >= 0; endArrayPosition--) { + if (validQuery.charAt(endArrayPosition) == '}') { + break; + } + } + + String testQuery = validQuery.substring(0, insertionPosition) + "[" + + validQuery.substring(insertionPosition, endArrayPosition) + "]" + + validQuery.substring(endArrayPosition, validQuery.length()); + + try { + parseQuery(testQuery); + fail("some parsing exception expected for query: " + testQuery); + } catch (ParsingException e) { + assertEquals("[" + queryName + "] query malformed, no start_object after query name", e.getMessage()); + } + } + /** * Returns alternate string representation of the query that need to be tested as they are never used as output * of {@link QueryBuilder#toXContent(XContentBuilder, ToXContent.Params)}. By default there are no alternate versions. From fa3420c2a529c1863af1d07e0460f98a461a3e97 Mon Sep 17 00:00:00 2001 From: Mary Date: Tue, 2 Aug 2016 19:10:13 -0500 Subject: [PATCH 017/103] Update term-level-queries.asciidoc Typo fix --- docs/reference/query-dsl/term-level-queries.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/query-dsl/term-level-queries.asciidoc b/docs/reference/query-dsl/term-level-queries.asciidoc index 9c28a727b33..a6aae489668 100644 --- a/docs/reference/query-dsl/term-level-queries.asciidoc +++ b/docs/reference/query-dsl/term-level-queries.asciidoc @@ -32,7 +32,7 @@ The queries in this group are: <>:: - Find documents where the field specified contains terms which being with + Find documents where the field specified contains terms which begin with the exact prefix specified. <>:: From eb6da69e9fdede45dd3669805e08f8549194088b Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 3 Aug 2016 15:23:34 -0400 Subject: [PATCH 018/103] Explicitly tell Netty to not use unsafe With the security permissions that we grant to Netty, Netty can not access unsafe (because it relies on having the runtime permission accessDeclaredMembers and the reflect permission suppressAccessChecks). Instead, we should just explicitly tell Netty to not use unsafe. This commit adds a flag to the default jvm.options to tell Netty to not look for unsafe. Relates #19786 --- distribution/src/main/resources/config/jvm.options | 3 +++ 1 file changed, 3 insertions(+) diff --git a/distribution/src/main/resources/config/jvm.options b/distribution/src/main/resources/config/jvm.options index fecb7e00493..2feba025509 100644 --- a/distribution/src/main/resources/config/jvm.options +++ b/distribution/src/main/resources/config/jvm.options @@ -59,6 +59,9 @@ # use our provided JNA always versus the system one -Djna.nosys=true +# flag to explicitly tell Netty to not use unsafe +-Dio.netty.noUnsafe=true + ## heap dumps # generate a heap dump when an allocation from the Java heap fails From e249ad8dfe6568e82661b81fa690870c9d76da6d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 3 Aug 2016 15:01:19 -0400 Subject: [PATCH 019/103] Fix loggerUsageCheck after clean The `loggerUsageCheck` can only run on directories that exist. It was checking whether or not the directories exists before they were built built and then deciding to do no work. But only if you are building in a cleaned environment which CI does, but people rarely do locally. --- .../gradle/precommit/LoggerUsageTask.groovy | 26 +++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy index b280a74db58..01ec6f7f5d3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy @@ -38,7 +38,7 @@ public class LoggerUsageTask extends LoggedExec { private FileCollection classpath; - private List classDirectories; + private FileCollection classDirectories; public LoggerUsageTask() { project.afterEvaluate { @@ -46,15 +46,25 @@ public class LoggerUsageTask extends LoggedExec { description = "Runs LoggerUsageCheck on ${classDirectories}" executable = new File(project.javaHome, 'bin/java') if (classDirectories == null) { - classDirectories = [] - if (project.sourceSets.findByName("main") && project.sourceSets.main.output.classesDir.exists()) { - classDirectories += [project.sourceSets.main.output.classesDir] + // Default to main and test class files + List files = [] + // But only if the source sets that will make them exist + if (project.sourceSets.findByName("main")) { + files.add(project.sourceSets.main.output.classesDir) dependsOn project.tasks.classes } - if (project.sourceSets.findByName("test") && project.sourceSets.test.output.classesDir.exists()) { - classDirectories += [project.sourceSets.test.output.classesDir] + if (project.sourceSets.findByName("test")) { + files.add(project.sourceSets.test.output.classesDir) dependsOn project.tasks.testClasses } + /* In an extra twist, it isn't good enough that the source set + * exists. Empty source sets won't make a classes directory + * which will cause the check to fail. We have to filter the + * empty directories out manually. This filter is done right + * before the actual logger usage check giving the rest of the + * build the opportunity to actually build the directory. + */ + classDirectories = project.files(files).filter { it.exists() } } doFirst({ args('-cp', getClasspath().asPath, 'org.elasticsearch.test.loggerusage.ESLoggerUsageChecker') @@ -79,11 +89,11 @@ public class LoggerUsageTask extends LoggedExec { } @InputFiles - List getClassDirectories() { + FileCollection getClassDirectories() { return classDirectories } - void setClassDirectories(List classDirectories) { + void setClassDirectories(FileCollection classDirectories) { this.classDirectories = classDirectories } From be87d50f32d0ad59bed4cf98bd66e262a085b7dd Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Wed, 3 Aug 2016 16:24:24 -0400 Subject: [PATCH 020/103] Fixes CreateIndexIT test that assumes an index create propogated before calling delete. --- .../action/admin/indices/create/CreateIndexIT.java | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 57adec340ac..ba497ffca33 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -481,29 +481,26 @@ public class CreateIndexIT extends ESIntegTestCase { * This test ensures that index creation adheres to the {@link IndexMetaData#SETTING_WAIT_FOR_ACTIVE_SHARDS}. */ public void testDefaultWaitForActiveShardsUsesIndexSetting() throws Exception { - final String indexName = "test"; final int numReplicas = internalCluster().numDataNodes(); Settings settings = Settings.builder() .put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(numReplicas)) .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), numReplicas) .build(); - assertAcked(client().admin().indices().prepareCreate(indexName).setSettings(settings).get()); - assertAcked(client().admin().indices().prepareDelete(indexName)); + assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); // all should fail settings = Settings.builder() .put(settings) .put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), "all") .build(); - assertFalse(client().admin().indices().prepareCreate(indexName).setSettings(settings).setTimeout("100ms").get().isShardsAcked()); - assertAcked(client().admin().indices().prepareDelete(indexName)); + assertFalse(client().admin().indices().prepareCreate("test-idx-2").setSettings(settings).setTimeout("100ms").get().isShardsAcked()); // the numeric equivalent of all should also fail settings = Settings.builder() .put(settings) .put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(numReplicas + 1)) .build(); - assertFalse(client().admin().indices().prepareCreate(indexName).setSettings(settings).setTimeout("100ms").get().isShardsAcked()); + assertFalse(client().admin().indices().prepareCreate("test-idx-3").setSettings(settings).setTimeout("100ms").get().isShardsAcked()); } } From 3be1e7ec35df78c3e697e77ff30281f4ba9d5e72 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 3 Aug 2016 18:40:17 -0400 Subject: [PATCH 021/103] CONSOLify the completion suggester docs (#19758) * CONSOLEify search/suggesters/completion * CONSOLEify context suggester docs --- .../suggesters/completion-suggest.asciidoc | 68 +++++-- .../suggesters/context-suggest.asciidoc | 172 ++++++++---------- 2 files changed, 126 insertions(+), 114 deletions(-) diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index c40b7e4d3f3..d2d00948ec0 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -24,18 +24,24 @@ which indexes the field values for fast completions. [source,js] -------------------------------------------------- -PUT music/song/_mapping +PUT music { - "song" : { - "properties" : { - ... - "suggest" : { - "type" : "completion" + "mappings": { + "song" : { + "properties" : { + "suggest" : { + "type" : "completion" + }, + "title" : { + "type": "keyword" + } } } } } -------------------------------------------------- +// CONSOLE +// TESTSETUP Mapping supports the following parameters: @@ -80,7 +86,7 @@ the suggestions will be scored. Indexing a suggestion is as follows: [source,js] -------------------------------------------------- -PUT music/song/1?refresh=true +PUT music/song/1?refresh { "suggest" : { "input": [ "Nevermind", "Nirvana" ], @@ -88,6 +94,8 @@ PUT music/song/1?refresh=true } } -------------------------------------------------- +// CONSOLE +// TEST The following parameters are supported: @@ -104,7 +112,7 @@ You can index multiple suggestions for a document as follows: [source,js] -------------------------------------------------- -PUT music/song/1?refresh=true +PUT music/song/1?refresh { "suggest" : [ { @@ -118,16 +126,21 @@ PUT music/song/1?refresh=true ] } -------------------------------------------------- +// CONSOLE +// TEST[continued] You can use the following shorthand form. Note that you can not specify a weight with suggestion(s). [source,js] -------------------------------------------------- +PUT music/song/1?refresh { "suggest" : [ "Nevermind", "Nirvana" ] } -------------------------------------------------- +// CONSOLE +// TEST[continued] [[querying]] ==== Querying @@ -135,20 +148,27 @@ a weight with suggestion(s). Suggesting works as usual, except that you have to specify the suggest type as `completion`. Suggestions are near real-time, which means new suggestions can be made visible by <> and -documents once deleted are never shown. +documents once deleted are never shown. This request: [source,js] -------------------------------------------------- POST music/_suggest?pretty { "song-suggest" : { - "prefix" : "n", + "prefix" : "nir", "completion" : { "field" : "suggest" } } } +-------------------------------------------------- +// CONSOLE +// TEST[continued] +returns this response: + +[source,js] +-------------------------------------------------- { "_shards" : { "total" : 5, @@ -156,16 +176,17 @@ POST music/_suggest?pretty "failed" : 0 }, "song-suggest" : [ { - "text" : "n", + "text" : "nir", "offset" : 0, - "length" : 1, + "length" : 3, "options" : [ { "text" : "Nirvana", - "score" : 34.0 + "score" : 1.0 } ] } ] } -------------------------------------------------- +// TESTRESPONSE The configured weight for a suggestion is returned as `score`. The `text` field uses the `input` of your indexed suggestion. @@ -179,12 +200,13 @@ as follows: [source,js] -------------------------------------------------- -POST music/song +POST music/song?refresh { "suggest" : "Nirvana", "title" : "Nevermind" } -------------------------------------------------- +// CONSOLE You can get the "title" as part of the suggestion payload by specifying it as a `payload`: @@ -196,12 +218,19 @@ POST music/_suggest?pretty "song-suggest" : { "prefix" : "n", "completion" : { - "field" : "suggest" + "field" : "suggest", "payload" : [ "title" ] <1> } } } +-------------------------------------------------- +// CONSOLE +// TEST[continued] +returns: + +[source,js] +-------------------------------------------------- { "_shards" : { "total" : 5, @@ -214,7 +243,7 @@ POST music/_suggest?pretty "length" : 1, "options" : [ { "text" : "Nirvana", - "score" : 34.0, + "score" : 1.0, "payload" : { "title" : [ "Nevermind" ] } @@ -222,6 +251,7 @@ POST music/_suggest?pretty } ] } -------------------------------------------------- +// TESTRESPONSE <1> The fields to be returned as part of each suggestion payload. The basic completion suggester query supports the following parameters: @@ -250,7 +280,7 @@ you can have a typo in your search and still get results back. POST music/_suggest?pretty { "song-suggest" : { - "prefix" : "n", + "prefix" : "nor", "completion" : { "field" : "suggest", "fuzzy" : { @@ -260,6 +290,7 @@ POST music/_suggest?pretty } } -------------------------------------------------- +// CONSOLE Suggestions that share the longest prefix to the query `prefix` will be scored higher. @@ -308,11 +339,12 @@ POST music/_suggest?pretty "song-suggest" : { "regex" : "n[ever|i]r", "completion" : { - "field" : "suggest", + "field" : "suggest" } } } -------------------------------------------------- +// CONSOLE The regex query can take specific regex parameters. The following parameters are supported: diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index ddec2bb193d..8e834bf5bb0 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -13,38 +13,73 @@ Every context mapping has a unique name and a type. There are two types: `catego and `geo`. Context mappings are configured under the `contexts` parameter in the field mapping. -The following defines two context mappings for a completion field: +The following defines types, each with two context mappings for a completion +field: [source,js] -------------------------------------------------- -PUT place/shops/_mapping +PUT place { - "shops" : { - "properties" : { - ... - "suggest" : { - "type" : "completion", - "contexts": [ - { <1> - "name": "place_type", - "type": "category", - "path": "cat" - }, - { <2> - "name": "location" - "type": "geo", - "path": "loc" - } - ] + "mappings": { + "shops" : { + "properties" : { + "suggest" : { + "type" : "completion", + "contexts": [ + { <1> + "name": "place_type", + "type": "category", + "path": "cat" + }, + { <2> + "name": "location", + "type": "geo", + "precision": 4 + } + ] + } + } + } + } +} +PUT place_path_category +{ + "mappings": { + "shops" : { + "properties" : { + "suggest" : { + "type" : "completion", + "contexts": [ + { <3> + "name": "place_type", + "type": "category", + "path": "cat" + }, + { <4> + "name": "location", + "type": "geo", + "precision": 4, + "path": "loc" + } + ] + }, + "loc": { + "type": "geo_point" + } } } } } -------------------------------------------------- -<1> Defines a `category` context named 'place_type', which will index values from field 'cat'. - See <> -<2> Defines a `geo` context named 'location', which will index values from field 'loc'. - See <> +// TESTSETUP +<1> Defines a `category` context named 'place_type' where the categories must be + sent with the suggestions. +<2> Defines a `geo` context named 'location' where the categories must be sent + with the suggestions. +<1> Defines a `category` context named 'place_type' where the categories are + read from the `cat` field. +<2> Defines a `geo` context named 'location' where the categories are read from + the `loc` field. NOTE: Adding context mappings increases the index size for completion field. The completion index is entirely heap resident, you can monitor the completion field index size using <>. @@ -56,47 +91,14 @@ is entirely heap resident, you can monitor the completion field index size using The `category` context allows you to associate one or more categories with suggestions at index time. At query time, suggestions can be filtered and boosted by their associated categories. -[float] -===== Category Mapping - -A `category` context mapping, where categories are provided explicitly with suggestions -can be defined as follows: - -[source,js] --------------------------------------------------- -"contexts": [ - { - "name": "cat_context", - "type": "category", - } -] --------------------------------------------------- - -Alternatively, A `category` context mapping that references another field within a document -can be defined as follows: - -[source,js] --------------------------------------------------- -"contexts": [ - { - "name": "cat_context", - "type": "category", - "path": "cat_field" - } -] --------------------------------------------------- - -[float] -===== Indexing category contexts - -Category contexts can be specified explicitly when indexing suggestions. If a suggestion has -multiple categories, the suggestion will be indexed for each category: +The mappings are set up like the `place_type` fields above. If `path` is defined +then the categories are read from that path in the document, otherwise they must +be sent in the suggest field like this: [source,js] -------------------------------------------------- PUT place/shops/1 { - ... "suggest": { "input": ["timmy's", "starbucks", "dunkin donuts"], "contexts": { @@ -105,36 +107,21 @@ PUT place/shops/1 } } -------------------------------------------------- - +// CONSOLE <1> These suggestions will be associated with 'cafe' and 'food' category. -Category contexts can also be referenced from another indexed field in the document via -the `path` parameter in the field mapping: +If the mapping had a `path` then the following index request would be enough to +add the categories: [source,js] -------------------------------------------------- -"contexts": [ - { - "name": "cat_context", - "type": "category", - "path": "cat" - } -] --------------------------------------------------- - -With the above mapping, the following will index the suggestions, treating the values of the -'cat' field as category contexts: - -[source,js] --------------------------------------------------- -PUT place/shops/1 +PUT place_path_category/shops/1 { - ... "suggest": ["timmy's", "starbucks", "dunkin donuts"], "cat": ["cafe", "food"] <1> } -------------------------------------------------- - +// CONSOLE <1> These suggestions will be associated with 'cafe' and 'food' category. NOTE: If context mapping references another field and the categories @@ -164,6 +151,8 @@ POST place/_suggest?pretty } } -------------------------------------------------- +// CONSOLE +// TEST[continued] NOTE: When no categories are provided at query-time, all indexed documents are considered. Querying with no categories on a category enabled completion field should be avoided, as it @@ -192,6 +181,8 @@ POST place/_suggest?pretty } } -------------------------------------------------- +// CONSOLE +// TEST[continued] <1> The context query filter suggestions associated with categories 'cafe' and 'restaurants' and boosts the suggestions associated with 'restaurants' by a @@ -244,21 +235,6 @@ In addition to the `path` setting, `geo` context mapping accepts the following s NOTE: The index time `precision` setting sets the maximum geohash precision that can be used at query time. -The following defines a `geo` context mapping with an index time precision of `4` -indexing values from a geo point field 'pin': - -[source,js] --------------------------------------------------- -"contexts": [ - { - "name": "location" - "type": "geo", - "precision": 4, - "path": "pin", - } -] --------------------------------------------------- - [float] ===== Indexing geo contexts @@ -273,7 +249,7 @@ PUT place/shops/1 { "suggest": { "input": "timmy's", - "contexts": [ + "contexts": { "location": [ { "lat": 43.6624803, @@ -284,10 +260,11 @@ PUT place/shops/1 "lon": -79.3873227 } ] - ] + } } } -------------------------------------------------- +// CONSOLE [float] ===== Geo location Query @@ -315,6 +292,8 @@ POST place/_suggest } } -------------------------------------------------- +// CONSOLE +// TEST[continued] NOTE: When a location with a lower precision at query time is specified, all suggestions that fall within the area will be considered. @@ -351,6 +330,7 @@ POST place/_suggest?pretty } } -------------------------------------------------- +// TEST[continued] <1> The context query filters for suggestions that fall under the geo location represented by a geohash of '(43.662, -79.380)' with a precision of '2' and boosts suggestions From bcc5c7c07ab5555494dc904648d96a6fbe326ead Mon Sep 17 00:00:00 2001 From: debadair Date: Wed, 3 Aug 2016 17:20:00 -0700 Subject: [PATCH 022/103] Docs: Fixed callout error that broke the build. --- docs/reference/search/suggesters/context-suggest.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index 8e834bf5bb0..fec167c7b84 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -76,9 +76,9 @@ PUT place_path_category sent with the suggestions. <2> Defines a `geo` context named 'location' where the categories must be sent with the suggestions. -<1> Defines a `category` context named 'place_type' where the categories are +<3> Defines a `category` context named 'place_type' where the categories are read from the `cat` field. -<2> Defines a `geo` context named 'location' where the categories are read from +<4> Defines a `geo` context named 'location' where the categories are read from the `loc` field. NOTE: Adding context mappings increases the index size for completion field. The completion index From 7d750d28117c93cbef6dcb84fa269ad418b1b134 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 3 Aug 2016 21:30:17 -0400 Subject: [PATCH 023/103] Increase Netty 3 REST test suite timeout This commit increases the Netty 3 REST test suite timeout to thirty minutes. This is to address these tests running slowly after increasing the number of nodes in the tests to two. This has surfaced that the tests are heavily impacted by excessive fsyncs from most tests using the default number of shards of five. --- .../http/netty3/Netty3ClientYamlTestSuiteIT.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3ClientYamlTestSuiteIT.java b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3ClientYamlTestSuiteIT.java index 2fce8e3022d..9ed1df1cfed 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3ClientYamlTestSuiteIT.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3ClientYamlTestSuiteIT.java @@ -21,13 +21,16 @@ package org.elasticsearch.http.netty3; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; +import org.apache.lucene.util.TimeUnits; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.elasticsearch.test.rest.yaml.parser.ClientYamlTestParseException; import java.io.IOException; +// TODO: remove timeout after address slow test issue +@TimeoutSuite(millis = 30 * TimeUnits.MINUTE) public class Netty3ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { public Netty3ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { @@ -38,4 +41,5 @@ public class Netty3ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { public static Iterable parameters() throws IOException, ClientYamlTestParseException { return ESClientYamlSuiteTestCase.createParameters(0, 1); } + } From 533412e36f9a855e91c0e676bc9dc9b691db6b84 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 3 Aug 2016 23:02:13 -0400 Subject: [PATCH 024/103] Improve cat thread pool API Today, when listing thread pools via the cat thread pool API, thread pools are listed in a column-delimited format. This is unfriendly to command-line tools, and inconsistent with other cat APIs. Instead, thread pools should be listed in a row-delimited format. Additionally, the cat thread pool API is limited to a fixed list of thread pools that excludes certain built-in thread pools as well as all custom thread pools. These thread pools should be available via the cat thread pool API. This commit improves the cat thread pool API by listing all thread pools (built-in or custom), and by listing them in a row-delimited format. Finally, for each node, the output thread pools are sorted by thread pool name. Relates #19721 --- .../rest/action/cat/RestThreadPoolAction.java | 233 ++++++------------ docs/reference/cat/thread_pool.asciidoc | 124 ++++++---- .../rest-api-spec/api/cat.thread_pool.json | 9 +- .../test/cat.thread_pool/10_basic.yaml | 137 +++------- 4 files changed, 173 insertions(+), 330 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index 880995ec5da..dc8c2d773e3 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -30,9 +30,9 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.rest.RestChannel; @@ -47,75 +47,26 @@ import org.elasticsearch.threadpool.ThreadPoolStats; import java.util.Collections; import java.util.HashMap; -import java.util.LinkedHashSet; +import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.TreeMap; import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestThreadPoolAction extends AbstractCatAction { - private static final String[] SUPPORTED_NAMES = new String[]{ - ThreadPool.Names.BULK, - ThreadPool.Names.FLUSH, - ThreadPool.Names.GENERIC, - ThreadPool.Names.GET, - ThreadPool.Names.INDEX, - ThreadPool.Names.MANAGEMENT, - ThreadPool.Names.FORCE_MERGE, - ThreadPool.Names.REFRESH, - ThreadPool.Names.SEARCH, - ThreadPool.Names.SNAPSHOT, - ThreadPool.Names.WARMER - }; - - private static final String[] SUPPORTED_ALIASES = new String[]{ - "b", - "f", - "ge", - "g", - "i", - "ma", - "fm", - "r", - "s", - "sn", - "w" - }; - - static { - assert SUPPORTED_ALIASES.length == SUPPORTED_NAMES.length: "SUPPORTED_NAMES/ALIASES mismatch"; - } - - private static final String[] DEFAULT_THREAD_POOLS = new String[]{ - ThreadPool.Names.BULK, - ThreadPool.Names.INDEX, - ThreadPool.Names.SEARCH, - }; - - private static final Map ALIAS_TO_THREAD_POOL; - private static final Map THREAD_POOL_TO_ALIAS; - - static { - ALIAS_TO_THREAD_POOL = new HashMap<>(SUPPORTED_NAMES.length); - for (String supportedThreadPool : SUPPORTED_NAMES) { - ALIAS_TO_THREAD_POOL.put(supportedThreadPool.substring(0, 3), supportedThreadPool); - } - THREAD_POOL_TO_ALIAS = new HashMap<>(SUPPORTED_NAMES.length); - for (int i = 0; i < SUPPORTED_NAMES.length; i++) { - THREAD_POOL_TO_ALIAS.put(SUPPORTED_NAMES[i], SUPPORTED_ALIASES[i]); - } - } - @Inject public RestThreadPoolAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(GET, "/_cat/thread_pool", this); + controller.registerHandler(GET, "/_cat/thread_pool/{thread_pool_patterns}", this); } @Override protected void documentation(StringBuilder sb) { sb.append("/_cat/thread_pool\n"); + sb.append("/_cat/thread_pool/{thread_pools}"); } @Override @@ -149,96 +100,55 @@ public class RestThreadPoolAction extends AbstractCatAction { @Override protected Table getTableWithHeader(final RestRequest request) { - Table table = new Table(); + final Table table = new Table(); table.startHeaders(); - table.addCell("id", "default:false;alias:nodeId;desc:unique node id"); + table.addCell("node_name", "default:true;alias:nn;desc:node name"); + table.addCell("node_id", "default:false;alias:id;desc:persistent node id"); + table.addCell("ephemeral_node_id", "default:false;alias:eid;desc:ephemeral node id"); table.addCell("pid", "default:false;alias:p;desc:process id"); - table.addCell("host", "alias:h;desc:host name"); - table.addCell("ip", "alias:i;desc:ip address"); + table.addCell("host", "default:false;alias:h;desc:host name"); + table.addCell("ip", "default:false;alias:i;desc:ip address"); table.addCell("port", "default:false;alias:po;desc:bound transport port"); - - final String[] requestedPools = fetchSortedPools(request, DEFAULT_THREAD_POOLS); - for (String pool : SUPPORTED_NAMES) { - String poolAlias = THREAD_POOL_TO_ALIAS.get(pool); - boolean display = false; - for (String requestedPool : requestedPools) { - if (pool.equals(requestedPool)) { - display = true; - break; - } - } - - String defaultDisplayVal = Boolean.toString(display); - table.addCell( - pool + ".type", - "alias:" + poolAlias + "t;default:false;desc:" + pool + " thread pool type" - ); - table.addCell( - pool + ".active", - "alias:" + poolAlias + "a;default:" + defaultDisplayVal + ";text-align:right;desc:number of active " + pool + " threads" - ); - table.addCell( - pool + ".size", - "alias:" + poolAlias + "s;default:false;text-align:right;desc:number of " + pool + " threads" - ); - table.addCell( - pool + ".queue", - "alias:" + poolAlias + "q;default:" + defaultDisplayVal + ";text-align:right;desc:number of " + pool + " threads in queue" - ); - table.addCell( - pool + ".queueSize", - "alias:" + poolAlias + "qs;default:false;text-align:right;desc:maximum number of " + pool + " threads in queue" - ); - table.addCell( - pool + ".rejected", - "alias:" + poolAlias + "r;default:" + defaultDisplayVal + ";text-align:right;desc:number of rejected " + pool + " threads" - ); - table.addCell( - pool + ".largest", - "alias:" + poolAlias + "l;default:false;text-align:right;desc:highest number of seen active " + pool + " threads" - ); - table.addCell( - pool + ".completed", - "alias:" + poolAlias + "c;default:false;text-align:right;desc:number of completed " + pool + " threads" - ); - table.addCell( - pool + ".min", - "alias:" + poolAlias + "mi;default:false;text-align:right;desc:minimum number of " + pool + " threads" - ); - table.addCell( - pool + ".max", - "alias:" + poolAlias + "ma;default:false;text-align:right;desc:maximum number of " + pool + " threads" - ); - table.addCell( - pool + ".keepAlive", - "alias:" + poolAlias + "k;default:false;text-align:right;desc:" + pool + " thread keep alive time" - ); - } - + table.addCell("name", "default:true;alias:n;desc:thread pool name"); + table.addCell("type", "alias:t;default:false;desc:thread pool type"); + table.addCell("active", "alias:a;default:true;text-align:right;desc:number of active threads"); + table.addCell("size", "alias:s;default:false;text-align:right;desc:number of threads"); + table.addCell("queue", "alias:q;default:true;text-align:right;desc:number of tasks currently in queue"); + table.addCell("queue_size", "alias:qs;default:false;text-align:right;desc:maximum number of tasks permitted in queue"); + table.addCell("rejected", "alias:r;default:true;text-align:right;desc:number of rejected tasks"); + table.addCell("largest", "alias:l;default:false;text-align:right;desc:highest number of seen active threads"); + table.addCell("completed", "alias:c;default:false;text-align:right;desc:number of completed tasks"); + table.addCell("min", "alias:mi;default:false;text-align:right;desc:minimum number of threads"); + table.addCell("max", "alias:ma;default:false;text-align:right;desc:maximum number of threads"); + table.addCell("keep_alive", "alias:ka;default:false;text-align:right;desc:thread keep alive time"); table.endHeaders(); return table; } - private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats) { - boolean fullId = req.paramAsBoolean("full_id", false); - DiscoveryNodes nodes = state.getState().nodes(); - Table table = getTableWithHeader(req); + final String[] threadPools = req.paramAsStringArray("thread_pool_patterns", new String[] { "*" }); + final DiscoveryNodes nodes = state.getState().nodes(); + final Table table = getTableWithHeader(req); - for (DiscoveryNode node : nodes) { - NodeInfo info = nodesInfo.getNodesMap().get(node.getId()); - NodeStats stats = nodesStats.getNodesMap().get(node.getId()); - table.startRow(); - - table.addCell(fullId ? node.getId() : Strings.substring(node.getId(), 0, 4)); - table.addCell(info == null ? null : info.getProcess().getId()); - table.addCell(node.getHostName()); - table.addCell(node.getHostAddress()); - if (node.getAddress() instanceof InetSocketTransportAddress) { - table.addCell(((InetSocketTransportAddress) node.getAddress()).address().getPort()); - } else { - table.addCell("-"); + // collect all thread pool names that we see across the nodes + final Set candidates = new HashSet<>(); + for (final NodeStats nodeStats : nodesStats.getNodes()) { + for (final ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) { + candidates.add(threadPoolStats.getName()); } + } + + // collect all thread pool names that match the specified thread pool patterns + final Set included = new HashSet<>(); + for (final String candidate : candidates) { + if (Regex.simpleMatch(threadPools, candidate)) { + included.add(candidate); + } + } + + for (final DiscoveryNode node : nodes) { + final NodeInfo info = nodesInfo.getNodesMap().get(node.getId()); + final NodeStats stats = nodesStats.getNodesMap().get(node.getId()); final Map poolThreadStats; final Map poolThreadInfo; @@ -247,8 +157,9 @@ public class RestThreadPoolAction extends AbstractCatAction { poolThreadStats = Collections.emptyMap(); poolThreadInfo = Collections.emptyMap(); } else { - poolThreadStats = new HashMap<>(14); - poolThreadInfo = new HashMap<>(14); + // we use a sorted map to ensure that thread pools are sorted by name + poolThreadStats = new TreeMap<>(); + poolThreadInfo = new HashMap<>(); ThreadPoolStats threadPoolStats = stats.getThreadPool(); for (ThreadPoolStats.Stats threadPoolStat : threadPoolStats) { @@ -260,9 +171,25 @@ public class RestThreadPoolAction extends AbstractCatAction { } } } - for (String pool : SUPPORTED_NAMES) { - ThreadPoolStats.Stats poolStats = poolThreadStats.get(pool); - ThreadPool.Info poolInfo = poolThreadInfo.get(pool); + for (Map.Entry entry : poolThreadStats.entrySet()) { + + if (!included.contains(entry.getKey())) continue; + + table.startRow(); + + table.addCell(node.getName()); + table.addCell(node.getId()); + table.addCell(node.getEphemeralId()); + table.addCell(info == null ? null : info.getProcess().getId()); + table.addCell(node.getHostName()); + table.addCell(node.getHostAddress()); + if (node.getAddress() instanceof InetSocketTransportAddress) { + table.addCell(((InetSocketTransportAddress) node.getAddress()).address().getPort()); + } else { + table.addCell("-"); + } + final ThreadPoolStats.Stats poolStats = entry.getValue(); + final ThreadPool.Info poolInfo = poolThreadInfo.get(entry.getKey()); Long maxQueueSize = null; String keepAlive = null; @@ -284,6 +211,7 @@ public class RestThreadPoolAction extends AbstractCatAction { } } + table.addCell(entry.getKey()); table.addCell(poolInfo == null ? null : poolInfo.getThreadPoolType().getType()); table.addCell(poolStats == null ? null : poolStats.getActive()); table.addCell(poolStats == null ? null : poolStats.getThreads()); @@ -295,34 +223,11 @@ public class RestThreadPoolAction extends AbstractCatAction { table.addCell(minThreads); table.addCell(maxThreads); table.addCell(keepAlive); - } - table.endRow(); + table.endRow(); + } } return table; } - - // The thread pool columns should always be in the same order. - private String[] fetchSortedPools(RestRequest request, String[] defaults) { - String[] headers = request.paramAsStringArray("h", null); - if (headers == null) { - return defaults; - } else { - Set requestedPools = new LinkedHashSet<>(headers.length); - for (String header : headers) { - int dotIndex = header.indexOf('.'); - if (dotIndex != -1) { - String headerPrefix = header.substring(0, dotIndex); - if (THREAD_POOL_TO_ALIAS.containsKey(headerPrefix)) { - requestedPools.add(headerPrefix); - } - } else if (ALIAS_TO_THREAD_POOL.containsKey(header)) { - requestedPools.add(ALIAS_TO_THREAD_POOL.get(header)); - } - - } - return requestedPools.toArray(new String[requestedPools.size()]); - } - } } diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index 1d01989a429..300767c256b 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -2,106 +2,124 @@ == cat thread pool The `thread_pool` command shows cluster wide thread pool statistics per node. By default the active, queue and rejected -statistics are returned for the bulk, index and search thread pools. +statistics are returned for all thread pools. [source,sh] -------------------------------------------------- % curl 192.168.56.10:9200/_cat/thread_pool -host1 192.168.1.35 0 0 0 0 0 0 0 0 0 -host2 192.168.1.36 0 0 0 0 0 0 0 0 0 +0EWUhXe bulk 0 0 0 +0EWUhXe fetch_shard_started 0 0 0 +0EWUhXe fetch_shard_store 0 0 0 +0EWUhXe flush 0 0 0 +0EWUhXe force_merge 0 0 0 +0EWUhXe generic 0 0 0 +0EWUhXe get 0 0 0 +0EWUhXe index 0 0 0 +0EWUhXe listener 0 0 0 +0EWUhXe management 1 0 0 +0EWUhXe refresh 0 0 0 +0EWUhXe search 0 0 0 +0EWUhXe snapshot 0 0 0 +0EWUhXe warmer 0 0 0 -------------------------------------------------- -The first two columns contain the host and ip of a node. +The first column is the node name [source,sh] -------------------------------------------------- -host ip -host1 192.168.1.35 -host2 192.168.1.36 +node_name +0EWUhXe -------------------------------------------------- -The next three columns show the active queue and rejected statistics for the bulk thread pool. +The second column is the thread pool name +[source,sh] +-------------------------------------------------- +name +bulk +fetch_shard_started +fetch_shard_store +flush +force_merge +generic +get +index +listener +management +refresh +search +snapshot +warmer +-------------------------------------------------- + + +The next three columns show the active, queue, and rejected statistics for each thread pool [source,sh] -------------------------------------------------- -bulk.active bulk.queue bulk.rejected - 0 0 0 +active queue rejected + 0 0 0 + 0 0 0 + 0 0 0 + 0 0 0 + 0 0 0 + 0 0 0 + 0 0 0 + 0 0 0 + 0 0 0 + 1 0 0 + 0 0 0 + 0 0 0 + 0 0 0 + 0 0 0 -------------------------------------------------- -The remaining columns show the active queue and rejected statistics of the index and search thread pool respectively. - -Also other statistics of different thread pools can be retrieved by using the `h` (header) parameter. +The cat thread pool API accepts a `thread_pool_patterns` URL parameter for specifying a +comma-separated list of regular expressions to match thread pool names. [source,sh] -------------------------------------------------- -% curl 'localhost:9200/_cat/thread_pool?v&h=id,host,suggest.active,suggest.rejected,suggest.completed' -host suggest.active suggest.rejected suggest.completed -host1 0 0 0 -host2 0 0 0 +% curl 'localhost:9200/_cat/thread_pool/generic?v&h=id,name,active,rejected,completed' +id name active rejected completed +0EWUhXeBQtaVGlexUeVwMg generic 0 0 70 -------------------------------------------------- -Here the host columns and the active, rejected and completed suggest thread pool statistic are displayed. The suggest -thread pool won't be displayed by default, so you always need to be specific about what statistic you want to display. - -[float] -=== Available Thread Pools - -Currently available <>: - -[cols="<,<,<",options="header"] -|======================================================================= -|Thread Pool |Alias |Description -|`bulk` |`b` |Thread pool used for <> operations -|`flush` |`f` |Thread pool used for <> operations -|`generic` |`ge` |Thread pool used for generic operations (e.g. background node discovery) -|`get` |`g` |Thread pool used for <> operations -|`index` |`i` |Thread pool used for <>/<> operations -|`management` |`ma` |Thread pool used for management of Elasticsearch (e.g. cluster management) -|`force_merge` |`fm` |Thread pool used for <> operations -|`refresh` |`r` |Thread pool used for <> operations -|`search` |`s` |Thread pool used for <>/<> operations -|`snapshot` |`sn` |Thread pool used for <> operations -|`suggest` |`su` |Thread pool used for <> operations -|`warmer` |`w` |Thread pool used for index warm-up operations -|======================================================================= - -The thread pool name (or alias) must be combined with a thread pool field below -to retrieve the requested information. +Here the host columns and the active, rejected and completed suggest thread pool statistic are displayed. +All <> and custom thread pools are available. [float] ==== Thread Pool Fields For each thread pool, you can load details about it by using the field names -in the table below, either using the full field name (e.g. `bulk.active`) or -its alias (e.g. `sa` is equivalent to `search.active`). +in the table below. [cols="<,<,<",options="header"] |======================================================================= |Field Name |Alias |Description -|`type` |`t` |The current (*) type of thread pool (`cached`, `fixed` or `scaling`) +|`type` |`t` |The current (*) type of thread pool (`fixed` or `scaling`) |`active` |`a` |The number of active threads in the current thread pool |`size` |`s` |The number of threads in the current thread pool |`queue` |`q` |The number of tasks in the queue for the current thread pool -|`queueSize` |`qs` |The maximum number of tasks in the queue for the current thread pool -|`rejected` |`r` |The number of rejected threads in the current thread pool +|`queue_size` |`qs` |The maximum number of tasks permitted in the queue for the current thread pool +|`rejected` |`r` |The number of tasks rejected by the thread pool executor |`largest` |`l` |The highest number of active threads in the current thread pool -|`completed` |`c` |The number of completed threads in the current thread pool +|`completed` |`c` |The number of tasks completed by the thread pool executor |`min` |`mi` |The configured minimum number of active threads allowed in the current thread pool |`max` |`ma` |The configured maximum number of active threads allowed in the current thread pool -|`keepAlive` |`k` |The configured keep alive time for threads +|`keep_alive` |`k` |The configured keep alive time for threads |======================================================================= [float] === Other Fields In addition to details about each thread pool, it is also convenient to get an -understanding of where those thread pools reside. As such, you can request +understanding of where those thread pools reside. As such, you can request other details like the `ip` of the responding node(s). [cols="<,<,<",options="header"] |======================================================================= |Field Name |Alias |Description -|`id` |`nodeId` |The unique node ID +|`node_id` |`id` |The unique node ID +|`ephemeral_id`|`eid` |The ephemeral node ID |`pid` |`p` |The process ID of the running node |`host` |`h` |The hostname for the current node |`ip` |`i` |The IP address for the current node diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json index b291804bbfc..d6ba5ebc6b1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json @@ -4,7 +4,7 @@ "methods": ["GET"], "url": { "path": "/_cat/thread_pool", - "paths": ["/_cat/thread_pool"], + "paths": ["/_cat/thread_pool","/_cat/thread_pool/{thread_pools}"], "parts": { }, "params": { @@ -39,10 +39,9 @@ "description": "Verbose mode. Display column headers", "default": false }, - "full_id": { - "type": "boolean", - "description": "Enables displaying the complete node ids", - "default": false + "thread_pool_patterns": { + "type": "list", + "description": "A comma-separated list of regular-expressions to filter the thread pools in the output" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml index b79100664ab..a7d4e6c9901 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml @@ -6,17 +6,17 @@ - match: $body: | - / #host ip bulk.active bulk.queue bulk.rejected index.active index.queue index.rejected search.active search.queue search.rejected - ^ (\S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + / #node_name name active queue rejected + ^ (\S+ \s+ \S+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ - do: cat.thread_pool: v: true - match: - $body: | - /^ host \s+ ip \s+ bulk.active \s+ bulk.queue \s+ bulk.rejected \s+ index.active \s+ index.queue \s+ index.rejected \s+ search.active \s+ search.queue \s+ search.rejected \n - (\S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + $body: | + /^ node_name \s+ name \s+ active \s+ queue \s+ rejected \n + (\S+ \s+ \S+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ - do: cat.thread_pool: @@ -25,135 +25,56 @@ - match: $body: | / #pid id host ip port - ^ (\d+ \s+ \S{4} \s+ \S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ (\d+|-) \n)+ $/ + (\d+ \s+ \S+ \s+ \S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ (\d+|-) \n)+ $/ - do: cat.thread_pool: - h: id,ba,fa,gea,ga,ia,maa,ma,fma - v: true - full_id: true - - - match: - $body: | - /^ id \s+ ba \s+ fa \s+ gea \s+ ga \s+ ia \s+ maa \s+ fma \n - (\S+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ - - - do: - cat.thread_pool: - h: id,bulk.type,bulk.active,bulk.size,bulk.queue,bulk.queueSize,bulk.rejected,bulk.largest,bulk.completed,bulk.min,bulk.max,bulk.keepAlive + thread_pool_patterns: bulk,management,flush,index,generic,force_merge + h: id,name,active v: true - match: $body: | - /^ id \s+ bulk.type \s+ bulk.active \s+ bulk.size \s+ bulk.queue \s+ bulk.queueSize \s+ bulk.rejected \s+ bulk.largest \s+ bulk.completed \s+ bulk.min \s+ bulk.max \s+ bulk.keepAlive \n - (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ + /^ id \s+ name \s+ active \n + (\S+\s+ bulk \s+ \d+ \n + \S+\s+ flush \s+ \d+ \n + \S+\s+ force_merge \s+ \d+ \n + \S+\s+ generic \s+ \d+ \n + \S+\s+ index \s+ \d+ \n + \S+\s+ management \s+ \d+ \n)+ $/ - do: cat.thread_pool: - h: id,flush.type,flush.active,flush.size,flush.queue,flush.queueSize,flush.rejected,flush.largest,flush.completed,flush.min,flush.max,flush.keepAlive + thread_pool_patterns: bulk + h: id,name,type,active,size,queue,queue_size,rejected,largest,completed,min,max,keep_alive v: true - match: $body: | - /^ id \s+ flush.type \s+ flush.active \s+ flush.size \s+ flush.queue \s+ flush.queueSize \s+ flush.rejected \s+ flush.largest \s+ flush.completed \s+ flush.min \s+ flush.max \s+ flush.keepAlive \n - (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ + /^ id \s+ name \s+ type \s+ active \s+ size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ min \s+ max \s+ keep_alive \n + (\S+ \s+ bulk \s+ fixed \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - do: cat.thread_pool: - h: id,generic.type,generic.active,generic.size,generic.queue,generic.queueSize,generic.rejected,generic.largest,generic.completed,generic.min,generic.max,generic.keepAlive + thread_pool_patterns: fetch* + h: id,name,type,active,size,queue,queue_size,rejected,largest,completed,min,max,keep_alive v: true - match: $body: | - /^ id \s+ generic.type \s+ generic.active \s+ generic.size \s+ generic.queue \s+ generic.queueSize \s+ generic.rejected \s+ generic.largest \s+ generic.completed \s+ generic.min \s+ generic.max \s+ generic.keepAlive \n - (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - - - do: - cat.thread_pool: - h: id,get.type,get.active,get.size,get.queue,get.queueSize,get.rejected,get.largest,get.completed,get.min,get.max,get.keepAlive - v: true - - - match: - $body: | - /^ id \s+ get.type \s+ get.active \s+ get.size \s+ get.queue \s+ get.queueSize \s+ get.rejected \s+ get.largest \s+ get.completed \s+ get.min \s+ get.max \s+ get.keepAlive \n - (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - - - do: - cat.thread_pool: - h: id,index.type,index.active,index.size,index.queue,index.queueSize,index.rejected,index.largest,index.completed,index.min,index.max,index.keepAlive - v: true - - - match: - $body: | - /^ id \s+ index.type \s+ index.active \s+ index.size \s+ index.queue \s+ index.queueSize \s+ index.rejected \s+ index.largest \s+ index.completed \s+ index.min \s+ index.max \s+ index.keepAlive \n - (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - - - do: - cat.thread_pool: - h: id,management.type,management.active,management.size,management.queue,management.queueSize,management.rejected,management.largest,management.completed,management.min,management.max,management.keepAlive - v: true - - - match: - $body: | - /^ id \s+ management.type \s+ management.active \s+ management.size \s+ management.queue \s+ management.queueSize \s+ management.rejected \s+ management.largest \s+ management.completed \s+ management.min \s+ management.max \s+ management.keepAlive \n - (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - - - do: - cat.thread_pool: - h: id,force_merge.type,force_merge.active,force_merge.size,force_merge.queue,force_merge.queueSize,force_merge.rejected,force_merge.largest,force_merge.completed,force_merge.min,force_merge.max,force_merge.keepAlive - v: true - - - match: - $body: | - /^ id \s+ force_merge.type \s+ force_merge.active \s+ force_merge.size \s+ force_merge.queue \s+ force_merge.queueSize \s+ force_merge.rejected \s+ force_merge.largest \s+ force_merge.completed \s+ force_merge.min \s+ force_merge.max \s+ force_merge.keepAlive \n - (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - - - do: - cat.thread_pool: - h: id,refresh.type,refresh.active,refresh.size,refresh.queue,refresh.queueSize,refresh.rejected,refresh.largest,refresh.completed,refresh.min,refresh.max,refresh.keepAlive - v: true - - - match: - $body: | - /^ id \s+ refresh.type \s+ refresh.active \s+ refresh.size \s+ refresh.queue \s+ refresh.queueSize \s+ refresh.rejected \s+ refresh.largest \s+ refresh.completed \s+ refresh.min \s+ refresh.max \s+ refresh.keepAlive \n - (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - - - do: - cat.thread_pool: - h: id,search.type,search.active,search.size,search.queue,search.queueSize,search.rejected,search.largest,search.completed,search.min,search.max,search.keepAlive - v: true - - - match: - $body: | - /^ id \s+ search.type \s+ search.active \s+ search.size \s+ search.queue \s+ search.queueSize \s+ search.rejected \s+ search.largest \s+ search.completed \s+ search.min \s+ search.max \s+ search.keepAlive \n - (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - - - do: - cat.thread_pool: - h: id,snapshot.type,snapshot.active,snapshot.size,snapshot.queue,snapshot.queueSize,snapshot.rejected,snapshot.largest,snapshot.completed,snapshot.min,snapshot.max,snapshot.keepAlive - v: true - - - match: - $body: | - /^ id \s+ snapshot.type \s+ snapshot.active \s+ snapshot.size \s+ snapshot.queue \s+ snapshot.queueSize \s+ snapshot.rejected \s+ snapshot.largest \s+ snapshot.completed \s+ snapshot.min \s+ snapshot.max \s+ snapshot.keepAlive \n - (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - - - do: - cat.thread_pool: - h: id,warmer.type,warmer.active,warmer.size,warmer.queue,warmer.queueSize,warmer.rejected,warmer.largest,warmer.completed,warmer.min,warmer.max,warmer.keepAlive - v: true - - - match: - $body: | - /^ id \s+ warmer.type \s+ warmer.active \s+ warmer.size \s+ warmer.queue \s+ warmer.queueSize \s+ warmer.rejected \s+ warmer.largest \s+ warmer.completed \s+ warmer.min \s+ warmer.max \s+ warmer.keepAlive \n - (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ + /^ id \s+ name \s+ type \s+ active \s+ size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ min \s+ max \s+ keep_alive \n + (\S+ \s+ fetch_shard_started \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n + \S+ \s+ fetch_shard_store \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - do: cat.thread_pool: + thread_pool_patterns: bulk,index,search size: "" - match: $body: | - / #host ip bulk.active bulk.queue bulk.rejected index.active index.queue index.rejected search.active search.queue search.rejected - ^ (\S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + / #node_name name active queue rejected + ^ (\S+ \s+ bulk \s+ \d+ \s+ \d+ \s+ \d+ \n + \S+ \s+ index \s+ \d+ \s+ \d+ \s+ \d+ \n + \S+ \s+ search \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ From 146f02183dcbc28d7ccb5e28586446fe4e728f8a Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 3 Aug 2016 21:16:45 +0200 Subject: [PATCH 025/103] [TEST] remove unused methods and fix some warnings in AbstractQueryTestCase Also fix line length issues --- .../resources/checkstyle_suppressions.xml | 1 - .../test/AbstractQueryTestCase.java | 112 ++++++++---------- 2 files changed, 52 insertions(+), 61 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 12f1d7b5a6e..64a3cb29a54 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -1157,7 +1157,6 @@ - diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 215bfe5f18b..92acd702e3a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -19,19 +19,6 @@ package org.elasticsearch.test; -import java.io.Closeable; -import java.io.IOException; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.concurrent.ExecutionException; - import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.io.JsonStringEncoder; import org.apache.lucene.search.BoostQuery; @@ -58,7 +45,6 @@ import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Module; @@ -118,6 +104,19 @@ import org.junit.After; import org.junit.AfterClass; import org.junit.Before; +import java.io.Closeable; +import java.io.IOException; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.ExecutionException; + import static java.util.Collections.emptyList; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; @@ -300,7 +299,8 @@ public abstract class AbstractQueryTestCase> assertThat(validQuery, containsString("{")); for (int insertionPosition = 0; insertionPosition < validQuery.length(); insertionPosition++) { if (validQuery.charAt(insertionPosition) == '{') { - String testQuery = validQuery.substring(0, insertionPosition) + "{ \"newField\" : " + validQuery.substring(insertionPosition) + "}"; + String testQuery = validQuery.substring(0, insertionPosition) + "{ \"newField\" : " + + validQuery.substring(insertionPosition) + "}"; try { parseQuery(testQuery); fail("some parsing exception expected for query: " + testQuery); @@ -363,11 +363,12 @@ public abstract class AbstractQueryTestCase> /** * Parses the query provided as string argument and compares it with the expected result provided as argument as a {@link QueryBuilder} */ - protected static final void assertParsedQuery(String queryAsString, QueryBuilder expectedQuery) throws IOException { + protected static void assertParsedQuery(String queryAsString, QueryBuilder expectedQuery) throws IOException { assertParsedQuery(queryAsString, expectedQuery, ParseFieldMatcher.STRICT); } - protected static final void assertParsedQuery(String queryAsString, QueryBuilder expectedQuery, ParseFieldMatcher matcher) throws IOException { + protected static void assertParsedQuery(String queryAsString, QueryBuilder expectedQuery, ParseFieldMatcher matcher) + throws IOException { QueryBuilder newQuery = parseQuery(queryAsString, matcher); assertNotSame(newQuery, expectedQuery); assertEquals(expectedQuery, newQuery); @@ -377,31 +378,32 @@ public abstract class AbstractQueryTestCase> /** * Parses the query provided as bytes argument and compares it with the expected result provided as argument as a {@link QueryBuilder} */ - protected static final void assertParsedQuery(BytesReference queryAsBytes, QueryBuilder expectedQuery) throws IOException { + private static void assertParsedQuery(BytesReference queryAsBytes, QueryBuilder expectedQuery) throws IOException { assertParsedQuery(queryAsBytes, expectedQuery, ParseFieldMatcher.STRICT); } - protected static final void assertParsedQuery(BytesReference queryAsBytes, QueryBuilder expectedQuery, ParseFieldMatcher matcher) throws IOException { + private static void assertParsedQuery(BytesReference queryAsBytes, QueryBuilder expectedQuery, ParseFieldMatcher matcher) + throws IOException { QueryBuilder newQuery = parseQuery(queryAsBytes, matcher); assertNotSame(newQuery, expectedQuery); assertEquals(expectedQuery, newQuery); assertEquals(expectedQuery.hashCode(), newQuery.hashCode()); } - protected static final QueryBuilder parseQuery(String queryAsString) throws IOException { + protected static QueryBuilder parseQuery(String queryAsString) throws IOException { return parseQuery(queryAsString, ParseFieldMatcher.STRICT); } - protected static final QueryBuilder parseQuery(String queryAsString, ParseFieldMatcher matcher) throws IOException { + protected static QueryBuilder parseQuery(String queryAsString, ParseFieldMatcher matcher) throws IOException { XContentParser parser = XContentFactory.xContent(queryAsString).createParser(queryAsString); return parseQuery(parser, matcher); } - protected static final QueryBuilder parseQuery(BytesReference queryAsBytes) throws IOException { + protected static QueryBuilder parseQuery(BytesReference queryAsBytes) throws IOException { return parseQuery(queryAsBytes, ParseFieldMatcher.STRICT); } - protected static final QueryBuilder parseQuery(BytesReference queryAsBytes, ParseFieldMatcher matcher) throws IOException { + protected static QueryBuilder parseQuery(BytesReference queryAsBytes, ParseFieldMatcher matcher) throws IOException { XContentParser parser = XContentFactory.xContent(queryAsBytes).createParser(queryAsBytes); return parseQuery(parser, matcher); } @@ -428,7 +430,8 @@ public abstract class AbstractQueryTestCase> Query firstLuceneQuery = rewriteQuery(firstQuery, context).toQuery(context); assertNotNull("toQuery should not return null", firstLuceneQuery); assertLuceneQuery(firstQuery, firstLuceneQuery, context); - SearchContext.removeCurrent(); // remove after assertLuceneQuery since the assertLuceneQuery impl might access the context as well + //remove after assertLuceneQuery since the assertLuceneQuery impl might access the context as well + SearchContext.removeCurrent(); assertTrue( "query is not equal to its copy after calling toQuery, firstQuery: " + firstQuery + ", secondQuery: " + controlQuery, firstQuery.equals(controlQuery)); @@ -449,7 +452,8 @@ public abstract class AbstractQueryTestCase> assertLuceneQuery(secondQuery, secondLuceneQuery, context); SearchContext.removeCurrent(); - assertEquals("two equivalent query builders lead to different lucene queries", rewrite(secondLuceneQuery), rewrite(firstLuceneQuery)); + assertEquals("two equivalent query builders lead to different lucene queries", + rewrite(secondLuceneQuery), rewrite(firstLuceneQuery)); if (supportsBoostAndQueryName()) { secondQuery.boost(firstQuery.boost() + 1f + randomFloat()); @@ -476,20 +480,21 @@ public abstract class AbstractQueryTestCase> } /** - * Few queries allow you to set the boost and queryName on the java api, although the corresponding parser doesn't parse them as they are not supported. - * This method allows to disable boost and queryName related tests for those queries. Those queries are easy to identify: their parsers - * don't parse `boost` and `_name` as they don't apply to the specific query: wrapper query and match_none + * Few queries allow you to set the boost and queryName on the java api, although the corresponding parser + * doesn't parse them as they are not supported. This method allows to disable boost and queryName related tests for those queries. + * Those queries are easy to identify: their parsers don't parse `boost` and `_name` as they don't apply to the specific query: + * wrapper query and match_none */ protected boolean supportsBoostAndQueryName() { return true; } /** - * Checks the result of {@link QueryBuilder#toQuery(QueryShardContext)} given the original {@link QueryBuilder} and {@link QueryShardContext}. - * Verifies that named queries and boost are properly handled and delegates to {@link #doAssertLuceneQuery(AbstractQueryBuilder, Query, QueryShardContext)} - * for query specific checks. + * Checks the result of {@link QueryBuilder#toQuery(QueryShardContext)} given the original {@link QueryBuilder} + * and {@link QueryShardContext}. Verifies that named queries and boost are properly handled and delegates to + * {@link #doAssertLuceneQuery(AbstractQueryBuilder, Query, QueryShardContext)} for query specific checks. */ - protected final void assertLuceneQuery(QB queryBuilder, Query query, QueryShardContext context) throws IOException { + private void assertLuceneQuery(QB queryBuilder, Query query, QueryShardContext context) throws IOException { if (queryBuilder.queryName() != null) { Query namedQuery = context.copyNamedQueries().get(queryBuilder.queryName()); assertThat(namedQuery, equalTo(query)); @@ -512,8 +517,8 @@ public abstract class AbstractQueryTestCase> } /** - * Checks the result of {@link QueryBuilder#toQuery(QueryShardContext)} given the original {@link QueryBuilder} and {@link QueryShardContext}. - * Contains the query specific checks to be implemented by subclasses. + * Checks the result of {@link QueryBuilder#toQuery(QueryShardContext)} given the original {@link QueryBuilder} + * and {@link QueryShardContext}. Contains the query specific checks to be implemented by subclasses. */ protected abstract void doAssertLuceneQuery(QB queryBuilder, Query query, QueryShardContext context) throws IOException; @@ -596,7 +601,7 @@ public abstract class AbstractQueryTestCase> //we use the streaming infra to create a copy of the query provided as argument @SuppressWarnings("unchecked") - protected QB copyQuery(QB query) throws IOException { + private QB copyQuery(QB query) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { output.writeNamedWriteable(query); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), serviceHolder.namedWriteableRegistry)) { @@ -616,8 +621,7 @@ public abstract class AbstractQueryTestCase> * @return a new {@link QueryParseContext} based on the base test index and queryParserService */ protected static QueryParseContext createParseContext(XContentParser parser, ParseFieldMatcher matcher) { - QueryParseContext queryParseContext = new QueryParseContext(serviceHolder.indicesQueriesRegistry, parser, matcher); - return queryParseContext; + return new QueryParseContext(serviceHolder.indicesQueriesRegistry, parser, matcher); } /** @@ -659,7 +663,7 @@ public abstract class AbstractQueryTestCase> int terms = randomIntBetween(0, 3); StringBuilder builder = new StringBuilder(); for (int i = 0; i < terms; i++) { - builder.append(randomAsciiOfLengthBetween(1, 10) + " "); + builder.append(randomAsciiOfLengthBetween(1, 10)).append(" "); } return builder.toString().trim(); } @@ -669,20 +673,12 @@ public abstract class AbstractQueryTestCase> */ protected String getRandomFieldName() { // if no type is set then return a random field name - if (serviceHolder.currentTypes == null || serviceHolder.currentTypes.length == 0 || randomBoolean()) { + if (serviceHolder.currentTypes.length == 0 || randomBoolean()) { return randomAsciiOfLengthBetween(1, 10); } return randomFrom(MAPPED_LEAF_FIELD_NAMES); } - /** - * Helper method to return a random field (mapped or unmapped) and a value - */ - protected Tuple getRandomFieldNameAndValue() { - String fieldName = getRandomFieldName(); - return new Tuple<>(fieldName, getRandomValueForFieldName(fieldName)); - } - /** * Helper method to return a random rewrite method */ @@ -700,7 +696,7 @@ public abstract class AbstractQueryTestCase> return rewrite; } - protected String[] getRandomTypes() { + private String[] getRandomTypes() { String[] types; if (serviceHolder.currentTypes.length > 0 && randomBoolean()) { int numberOfQueryTypes = randomIntBetween(1, serviceHolder.currentTypes.length); @@ -738,10 +734,6 @@ public abstract class AbstractQueryTestCase> } } - protected static boolean isNumericFieldName(String fieldName) { - return INT_FIELD_NAME.equals(fieldName) || DOUBLE_FIELD_NAME.equals(fieldName); - } - protected static String randomAnalyzer() { return randomFrom("simple", "standard", "keyword", "whitespace"); } @@ -858,7 +850,7 @@ public abstract class AbstractQueryTestCase> return query; } - static class ServiceHolder implements Closeable { + private static class ServiceHolder implements Closeable { private final Injector injector; private final IndicesQueriesRegistry indicesQueriesRegistry; @@ -875,7 +867,7 @@ public abstract class AbstractQueryTestCase> private final BitsetFilterCache bitsetFilterCache; private final ScriptService scriptService; - public ServiceHolder(Collection> plugins, AbstractQueryTestCase testCase) throws IOException { + ServiceHolder(Collection> plugins, AbstractQueryTestCase testCase) throws IOException { // we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually. indexVersionCreated = randomBoolean() ? Version.CURRENT : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0_beta1, Version.CURRENT); @@ -890,8 +882,9 @@ public abstract class AbstractQueryTestCase> final ThreadPool threadPool = new ThreadPool(settings); index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_"); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - ClusterServiceUtils.setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder().put( - new IndexMetaData.Builder(index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0)))); + ClusterServiceUtils.setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData( + new MetaData.Builder().put(new IndexMetaData.Builder( + index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0)))); Environment env = InternalSettingsPreparer.prepareEnvironment(settings, null); PluginsService pluginsService =new PluginsService(settings, env.modulesFile(), env.pluginsFile(), plugins); @@ -947,7 +940,7 @@ public abstract class AbstractQueryTestCase> similarityService = new SimilarityService(idxSettings, Collections.emptyMap()); MapperRegistry mapperRegistry = injector.getInstance(MapperRegistry.class); mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry, - () -> createShardContext()); + this::createShardContext); IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() { }); indexFieldDataService = new IndexFieldDataService(idxSettings, indicesFieldDataCache, @@ -981,7 +974,8 @@ public abstract class AbstractQueryTestCase> ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); // also add mappings for two inner field in the object field mapperService.merge(type, new CompressedXContent("{\"properties\":{\"" + OBJECT_FIELD_NAME + "\":{\"type\":\"object\"," - + "\"properties\":{\"" + DATE_FIELD_NAME + "\":{\"type\":\"date\"},\"" + INT_FIELD_NAME + "\":{\"type\":\"integer\"}}}}}"), + + "\"properties\":{\"" + DATE_FIELD_NAME + "\":{\"type\":\"date\"},\"" + + INT_FIELD_NAME + "\":{\"type\":\"integer\"}}}}}"), MapperService.MergeReason.MAPPING_UPDATE, false); currentTypes[i] = type; } @@ -1019,7 +1013,5 @@ public abstract class AbstractQueryTestCase> Environment environment = new Environment(settings); return ScriptModule.create(settings, environment, null, scriptPlugins); } - } - } From d327dd46b179e3ab909a7a1c4b481070d97bcdf8 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 4 Aug 2016 10:23:36 +0200 Subject: [PATCH 026/103] Recovery: don't log an error when listing an empty folder --- .../elasticsearch/indices/recovery/RecoveryTargetService.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java index 1a13504fb1a..b6fed0e9a46 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java @@ -174,6 +174,9 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve } else { metadataSnapshot = recoveryTarget.indexShard().snapshotStoreMetadata(); } + } catch (org.apache.lucene.index.IndexNotFoundException e) { + // happens on an empty folder. no need to log + metadataSnapshot = Store.MetadataSnapshot.EMPTY; } catch (IOException e) { logger.warn("error while listing local files, recover as if there are none", e); metadataSnapshot = Store.MetadataSnapshot.EMPTY; From ede78ad231240f8200ccac60a32ecec07045b963 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 4 Aug 2016 12:00:37 +0200 Subject: [PATCH 027/103] Use primary terms as authority to fail shards (#19715) A primary shard currently instructs the master to fail a replica shard that it fails to replicate writes to before acknowledging the writes to the client. To ensure that the primary instructing the master to fail the replica is still the current primary in the cluster state on the master, it submits not only the identity of the replica shard to fail to the master but also its own shard identity. This can be problematic however when the primary is relocating. After primary relocation handoff but before the primary relocation target is activated, the primary relocation target is replicating writes through the authority of the primary relocation source. This means that the primary relocation target should probably send the identity of the primary relocation source as authority. However, this is not good enough either, as primary shard activation and shard failure instructions can arrive out-of-order. This means that the relocation target would have to send both relocation source and target identity as authority. Fortunately, there is another concept in the cluster state that represents this joint authority, namely primary terms. The primary term is only increased on initial assignment or when a replica is promoted. It stays the same however when a primary relocates. This commit changes ShardStateAction to rely on primary terms for shard authority. It also changes the wire format to only transmit ShardId and allocation id of the shard to fail (instead of the full ShardRouting), so that the same action can be used in a subsequent PR to remove allocation ids from the active allocation set for which there exist no ShardRouting in the cluster anymore. Last but not least, this commit also makes AllocationService less lenient, requiring ShardRouting instances that are passed to its applyStartedShards and applyFailedShards methods to exist in the routing table. ShardStateAction, which is calling these methods, now has the responsibility to resolve the ShardRouting objects that are to be started / failed, and remove duplicates. --- .../replication/ReplicationOperation.java | 8 +- .../TransportReplicationAction.java | 6 +- .../action/shard/ShardStateAction.java | 294 ++++++++++-------- .../routing/IndexShardRoutingTable.java | 2 +- .../cluster/routing/RoutingNodes.java | 18 +- .../cluster/routing/RoutingTable.java | 25 +- .../cluster/routing/ShardRouting.java | 23 +- .../routing/allocation/AllocationService.java | 205 ++++++------ .../allocation/FailedRerouteAllocation.java | 14 +- .../routing/allocation/RoutingAllocation.java | 3 +- .../allocation/StartedRerouteAllocation.java | 7 +- .../decider/ThrottlingAllocationDecider.java | 4 +- .../gateway/GatewayAllocator.java | 4 +- .../cluster/IndicesClusterStateService.java | 4 +- .../ReplicationOperationTests.java | 10 +- .../TransportReplicationActionTests.java | 9 +- ...rdFailedClusterStateTaskExecutorTests.java | 138 ++++---- .../action/shard/ShardStateActionTests.java | 33 +- .../cluster/routing/AllocationIdTests.java | 3 +- .../cluster/routing/ShardRoutingTests.java | 3 +- .../allocation/FailedShardsRoutingTests.java | 43 ++- .../allocation/StartedShardsRoutingTests.java | 51 +-- .../DiskThresholdDeciderUnitTests.java | 4 +- .../DiscoveryWithServiceDisruptionsIT.java | 2 +- .../zen/NodeJoinControllerTests.java | 2 +- .../ESIndexLevelReplicationTestCase.java | 2 +- .../indices/cluster/ClusterStateChanges.java | 37 ++- 27 files changed, 476 insertions(+), 478 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index af6d8b030ca..070840ca2ef 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -136,7 +136,7 @@ public class ReplicationOperation< } if (shard.relocating() && shard.relocatingNodeId().equals(localNodeId) == false) { - performOnReplica(shard.buildTargetRelocatingShard(), replicaRequest); + performOnReplica(shard.getTargetRelocatingShard(), replicaRequest); } } } @@ -167,7 +167,7 @@ public class ReplicationOperation< shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false)); String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard); logger.warn("[{}] {}", replicaException, shard.shardId(), message); - replicasProxy.failShard(shard, primary.routingEntry(), message, replicaException, + replicasProxy.failShard(shard, replicaRequest.primaryTerm(), message, replicaException, ReplicationOperation.this::decPendingAndFinishIfNeeded, ReplicationOperation.this::onPrimaryDemoted, throwable -> decPendingAndFinishIfNeeded() @@ -327,7 +327,7 @@ public class ReplicationOperation< /** * Fail the specified shard, removing it from the current set of active shards * @param replica shard to fail - * @param primary the primary shard that requested the failure + * @param primaryTerm the primary term of the primary shard when requesting the failure * @param message a (short) description of the reason * @param exception the original exception which caused the ReplicationOperation to request the shard to be failed * @param onSuccess a callback to call when the shard has been successfully removed from the active set. @@ -335,7 +335,7 @@ public class ReplicationOperation< * by the master. * @param onIgnoredFailure a callback to call when failing a shard has failed, but it that failure can be safely ignored and the */ - void failShard(ShardRouting replica, ShardRouting primary, String message, Exception exception, Runnable onSuccess, + void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess, Consumer onPrimaryDemoted, Consumer onIgnoredFailure); } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index a825cd1b9b5..8294ccfe0d6 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -866,10 +866,10 @@ public abstract class TransportReplicationAction< } @Override - public void failShard(ShardRouting replica, ShardRouting primary, String message, Exception exception, + public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess, Consumer onFailure, Consumer onIgnoredFailure) { - shardStateAction.shardFailed( - replica, primary, message, exception, + shardStateAction.remoteShardFailed( + replica, primaryTerm, message, exception, new ShardStateAction.Listener() { @Override public void onSuccess() { diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index e6a6dea7def..99b19e31a4e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -29,9 +29,8 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.MasterNodeChangePredicate; import org.elasticsearch.cluster.NotMasterException; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; @@ -64,11 +63,10 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Locale; -import java.util.Map; -import java.util.stream.Collectors; +import java.util.Set; public class ShardStateAction extends AbstractComponent { @@ -87,19 +85,19 @@ public class ShardStateAction extends AbstractComponent { this.clusterService = clusterService; this.threadPool = threadPool; - transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ShardRoutingEntry::new, ThreadPool.Names.SAME, new ShardStartedTransportHandler(clusterService, new ShardStartedClusterStateTaskExecutor(allocationService, logger), logger)); - transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardRoutingEntry::new, ThreadPool.Names.SAME, new ShardFailedTransportHandler(clusterService, new ShardFailedClusterStateTaskExecutor(allocationService, routingService, logger), logger)); + transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ShardEntry::new, ThreadPool.Names.SAME, new ShardStartedTransportHandler(clusterService, new ShardStartedClusterStateTaskExecutor(allocationService, logger), logger)); + transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardEntry::new, ThreadPool.Names.SAME, new ShardFailedTransportHandler(clusterService, new ShardFailedClusterStateTaskExecutor(allocationService, routingService, logger), logger)); } - private void sendShardAction(final String actionName, final ClusterStateObserver observer, final ShardRoutingEntry shardRoutingEntry, final Listener listener) { + private void sendShardAction(final String actionName, final ClusterStateObserver observer, final ShardEntry shardEntry, final Listener listener) { DiscoveryNode masterNode = observer.observedState().nodes().getMasterNode(); if (masterNode == null) { - logger.warn("{} no master known for action [{}] for shard [{}]", shardRoutingEntry.getShardRouting().shardId(), actionName, shardRoutingEntry.getShardRouting()); - waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener); + logger.warn("{} no master known for action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry); + waitForNewMasterAndRetry(actionName, observer, shardEntry, listener); } else { - logger.debug("{} sending [{}] to [{}] for shard [{}]", shardRoutingEntry.getShardRouting().shardId(), actionName, masterNode.getId(), shardRoutingEntry); + logger.debug("{} sending [{}] to [{}] for shard entry [{}]", shardEntry.shardId, actionName, masterNode.getId(), shardEntry); transportService.sendRequest(masterNode, - actionName, shardRoutingEntry, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + actionName, shardEntry, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { @Override public void handleResponse(TransportResponse.Empty response) { listener.onSuccess(); @@ -108,9 +106,9 @@ public class ShardStateAction extends AbstractComponent { @Override public void handleException(TransportException exp) { if (isMasterChannelException(exp)) { - waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener); + waitForNewMasterAndRetry(actionName, observer, shardEntry, listener); } else { - logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard [{}]", exp, shardRoutingEntry.getShardRouting().shardId(), actionName, masterNode, shardRoutingEntry); + logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", exp, shardEntry.shardId, actionName, masterNode, shardEntry); listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp); } } @@ -129,34 +127,46 @@ public class ShardStateAction extends AbstractComponent { } /** - * Send a shard failed request to the master node to update the - * cluster state. - * @param shardRouting the shard to fail - * @param sourceShardRouting the source shard requesting the failure (must be the shard itself, or the primary shard) + * Send a shard failed request to the master node to update the cluster state with the failure of a shard on another node. + * + * @param shardRouting the shard to fail + * @param primaryTerm the primary term associated with the primary shard that is failing the shard. * @param message the reason for the failure * @param failure the underlying cause of the failure * @param listener callback upon completion of the request */ - public void shardFailed(final ShardRouting shardRouting, ShardRouting sourceShardRouting, final String message, @Nullable final Exception failure, Listener listener) { + public void remoteShardFailed(final ShardRouting shardRouting, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) { + assert primaryTerm > 0L : "primary term should be strictly positive"; + shardFailed(shardRouting, primaryTerm, message, failure, listener); + } + + /** + * Send a shard failed request to the master node to update the cluster state when a shard on the local node failed. + */ + public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener) { + shardFailed(shardRouting, 0L, message, failure, listener); + } + + private void shardFailed(final ShardRouting shardRouting, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) { ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); - ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, sourceShardRouting, message, failure); - sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardRoutingEntry, listener); + ShardEntry shardEntry = new ShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), primaryTerm, message, failure); + sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardEntry, listener); } // visible for testing - protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardRoutingEntry shardRoutingEntry, Listener listener) { + protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardEntry shardEntry, Listener listener) { observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { if (logger.isTraceEnabled()) { - logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", state.prettyPrint(), shardRoutingEntry); + logger.trace("new cluster state [{}] after waiting for master election to fail shard entry [{}]", state.prettyPrint(), shardEntry); } - sendShardAction(actionName, observer, shardRoutingEntry, listener); + sendShardAction(actionName, observer, shardEntry, listener); } @Override public void onClusterServiceClose() { - logger.warn("{} node closed while execution action [{}] for shard [{}]", shardRoutingEntry.failure, shardRoutingEntry.getShardRouting().shardId(), actionName, shardRoutingEntry.getShardRouting()); + logger.warn("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.failure, shardEntry.shardId, actionName, shardEntry); listener.onFailure(new NodeClosedException(clusterService.localNode())); } @@ -168,7 +178,7 @@ public class ShardStateAction extends AbstractComponent { }, MasterNodeChangePredicate.INSTANCE); } - private static class ShardFailedTransportHandler implements TransportRequestHandler { + private static class ShardFailedTransportHandler implements TransportRequestHandler { private final ClusterService clusterService; private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor; private final ESLogger logger; @@ -180,8 +190,8 @@ public class ShardStateAction extends AbstractComponent { } @Override - public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { - logger.warn("{} received shard failed for {}", request.failure, request.shardRouting.shardId(), request); + public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception { + logger.warn("{} received shard failed for {}", request.failure, request.shardId, request); clusterService.submitStateUpdateTask( "shard-failed", request, @@ -190,22 +200,22 @@ public class ShardStateAction extends AbstractComponent { new ClusterStateTaskListener() { @Override public void onFailure(String source, Exception e) { - logger.error("{} unexpected failure while failing shard [{}]", e, request.shardRouting.shardId(), request.shardRouting); + logger.error("{} unexpected failure while failing shard [{}]", e, request.shardId, request); try { channel.sendResponse(e); } catch (Exception channelException) { channelException.addSuppressed(e); - logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelException, request.shardRouting.shardId(), e, request.shardRouting); + logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelException, request.shardId, e, request); } } @Override public void onNoLongerMaster(String source) { - logger.error("{} no longer master while failing shard [{}]", request.shardRouting.shardId(), request.shardRouting); + logger.error("{} no longer master while failing shard [{}]", request.shardId, request); try { channel.sendResponse(new NotMasterException(source)); } catch (Exception channelException) { - logger.warn("{} failed to send no longer master while failing shard [{}]", channelException, request.shardRouting.shardId(), request.shardRouting); + logger.warn("{} failed to send no longer master while failing shard [{}]", channelException, request.shardId, request); } } @@ -214,7 +224,7 @@ public class ShardStateAction extends AbstractComponent { try { channel.sendResponse(TransportResponse.Empty.INSTANCE); } catch (Exception channelException) { - logger.warn("{} failed to send response while failing shard [{}]", channelException, request.shardRouting.shardId(), request.shardRouting); + logger.warn("{} failed to send response while failing shard [{}]", channelException, request.shardId, request); } } } @@ -222,63 +232,81 @@ public class ShardStateAction extends AbstractComponent { } } - static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor { + public static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor { private final AllocationService allocationService; private final RoutingService routingService; private final ESLogger logger; - ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) { + public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) { this.allocationService = allocationService; this.routingService = routingService; this.logger = logger; } @Override - public String describeTasks(List tasks) { - return tasks.stream().map(entry -> entry.getShardRouting().toString()).reduce((s1, s2) -> s1 + ", " + s2).orElse(""); - } + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + BatchResult.Builder batchResultBuilder = BatchResult.builder(); + List tasksToBeApplied = new ArrayList<>(); + List shardRoutingsToBeApplied = new ArrayList<>(); + Set seenShardRoutings = new HashSet<>(); // to prevent duplicates - @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - BatchResult.Builder batchResultBuilder = BatchResult.builder(); + for (ShardEntry task : tasks) { + IndexMetaData indexMetaData = currentState.metaData().index(task.shardId.getIndex()); + if (indexMetaData == null) { + // tasks that correspond to non-existent shards are marked as successful + logger.debug("{} ignoring shard failed task [{}] (unknown index {})", task.shardId, task, task.shardId.getIndex()); + batchResultBuilder.success(task); + } else { + // non-local requests + if (task.primaryTerm > 0) { + long currentPrimaryTerm = indexMetaData.primaryTerm(task.shardId.id()); + if (currentPrimaryTerm != task.primaryTerm) { + assert currentPrimaryTerm > task.primaryTerm : "received a primary term with a higher term than in the " + + "current cluster state (received [" + task.primaryTerm + "] but current is [" + currentPrimaryTerm + "])"; + logger.debug("{} failing shard failed task [{}] (primary term {} does not match current term {})", task.shardId, + task, task.primaryTerm, indexMetaData.primaryTerm(task.shardId.id())); + batchResultBuilder.failure(task, new NoLongerPrimaryShardException( + task.shardId, + "primary term [" + task.primaryTerm + "] did not match current primary term [" + currentPrimaryTerm + "]")); + continue; + } + } - // partition tasks into those that correspond to shards - // that exist versus do not exist - Map> partition = - tasks.stream().collect(Collectors.groupingBy(task -> validateTask(currentState, task))); - - // tasks that correspond to non-existent shards are marked - // as successful - batchResultBuilder.successes(partition.getOrDefault(ValidationResult.SHARD_MISSING, Collections.emptyList())); + ShardRouting matched = currentState.getRoutingTable().getByAllocationId(task.shardId, task.allocationId); + if (matched == null) { + // tasks that correspond to non-existent shards are marked as successful + logger.debug("{} ignoring shard failed task [{}] (shard does not exist anymore)", task.shardId, task); + batchResultBuilder.success(task); + } else { + // remove duplicate actions as allocation service expects a clean list without duplicates + if (seenShardRoutings.contains(matched)) { + logger.trace("{} ignoring shard failed task [{}] (already scheduled to fail {})", task.shardId, task, matched); + tasksToBeApplied.add(task); + } else { + logger.debug("{} failing shard {} (shard failed task: [{}])", task.shardId, matched, task); + tasksToBeApplied.add(task); + shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(matched, task.message, task.failure)); + seenShardRoutings.add(matched); + } + } + } + } + assert tasksToBeApplied.size() >= shardRoutingsToBeApplied.size(); ClusterState maybeUpdatedState = currentState; - List tasksToFail = partition.getOrDefault(ValidationResult.VALID, Collections.emptyList()); try { - List failedShards = - tasksToFail - .stream() - .map(task -> new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure)) - .collect(Collectors.toList()); - RoutingAllocation.Result result = applyFailedShards(currentState, failedShards); + RoutingAllocation.Result result = applyFailedShards(currentState, shardRoutingsToBeApplied); if (result.changed()) { maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build(); } - batchResultBuilder.successes(tasksToFail); + batchResultBuilder.successes(tasksToBeApplied); } catch (Exception e) { + logger.warn("failed to apply failed shards {}", e, shardRoutingsToBeApplied); // failures are communicated back to the requester // cluster state will not be updated in this case - batchResultBuilder.failures(tasksToFail, e); + batchResultBuilder.failures(tasksToBeApplied, e); } - partition - .getOrDefault(ValidationResult.SOURCE_INVALID, Collections.emptyList()) - .forEach(task -> batchResultBuilder.failure( - task, - new NoLongerPrimaryShardException( - task.getShardRouting().shardId(), - "source shard [" + task.sourceShardRouting + "] is neither the local allocation nor the primary allocation") - )); - return batchResultBuilder.build(maybeUpdatedState); } @@ -287,36 +315,6 @@ public class ShardStateAction extends AbstractComponent { return allocationService.applyFailedShards(currentState, failedShards); } - private enum ValidationResult { - VALID, - SOURCE_INVALID, - SHARD_MISSING - } - - private ValidationResult validateTask(ClusterState currentState, ShardRoutingEntry task) { - - // non-local requests - if (!task.shardRouting.isSameAllocation(task.sourceShardRouting)) { - IndexShardRoutingTable indexShard = currentState.getRoutingTable().shardRoutingTableOrNull(task.shardRouting.shardId()); - if (indexShard == null) { - return ValidationResult.SOURCE_INVALID; - } - ShardRouting primaryShard = indexShard.primaryShard(); - if (primaryShard == null || !primaryShard.isSameAllocation(task.sourceShardRouting)) { - return ValidationResult.SOURCE_INVALID; - } - } - - RoutingNode routingNode = currentState.getRoutingNodes().node(task.getShardRouting().currentNodeId()); - if (routingNode != null) { - ShardRouting maybe = routingNode.getByShardId(task.getShardRouting().shardId()); - if (maybe != null && maybe.isSameAllocation(task.getShardRouting())) { - return ValidationResult.VALID; - } - } - return ValidationResult.SHARD_MISSING; - } - @Override public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { int numberOfUnassignedShards = clusterChangedEvent.state().getRoutingNodes().unassigned().size(); @@ -332,11 +330,11 @@ public class ShardStateAction extends AbstractComponent { public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener) { ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); - ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, shardRouting, message, null); - sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardRoutingEntry, listener); + ShardEntry shardEntry = new ShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, null); + sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardEntry, listener); } - private static class ShardStartedTransportHandler implements TransportRequestHandler { + private static class ShardStartedTransportHandler implements TransportRequestHandler { private final ClusterService clusterService; private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor; private final ESLogger logger; @@ -348,8 +346,8 @@ public class ShardStateAction extends AbstractComponent { } @Override - public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { - logger.debug("{} received shard started for [{}]", request.shardRouting.shardId(), request); + public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception { + logger.debug("{} received shard started for [{}]", request.shardId, request); clusterService.submitStateUpdateTask( "shard-started", request, @@ -360,7 +358,7 @@ public class ShardStateAction extends AbstractComponent { } } - private static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor, ClusterStateTaskListener { + public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor, ClusterStateTaskListener { private final AllocationService allocationService; private final ESLogger logger; @@ -370,17 +368,45 @@ public class ShardStateAction extends AbstractComponent { } @Override - public String describeTasks(List tasks) { - return tasks.stream().map(entry -> entry.getShardRouting().toString()).reduce((s1, s2) -> s1 + ", " + s2).orElse(""); - } - - @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - BatchResult.Builder builder = BatchResult.builder(); + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + BatchResult.Builder builder = BatchResult.builder(); + List tasksToBeApplied = new ArrayList<>(); List shardRoutingsToBeApplied = new ArrayList<>(tasks.size()); - for (ShardRoutingEntry task : tasks) { - shardRoutingsToBeApplied.add(task.shardRouting); + Set seenShardRoutings = new HashSet<>(); // to prevent duplicates + for (ShardEntry task : tasks) { + assert task.primaryTerm == 0L : "shard is only started by itself: " + task; + + ShardRouting matched = currentState.getRoutingTable().getByAllocationId(task.shardId, task.allocationId); + if (matched == null) { + // tasks that correspond to non-existent shards are marked as successful. The reason is that we resend shard started + // events on every cluster state publishing that does not contain the shard as started yet. This means that old stale + // requests might still be in flight even after the shard has already been started or failed on the master. We just + // ignore these requests for now. + logger.debug("{} ignoring shard started task [{}] (shard does not exist anymore)", task.shardId, task); + builder.success(task); + } else { + if (matched.initializing() == false) { + assert matched.active() : "expected active shard routing for task " + task + " but found " + matched; + // same as above, this might have been a stale in-flight request, so we just ignore. + logger.debug("{} ignoring shard started task [{}] (shard exists but is not initializing: {})", task.shardId, task, + matched); + builder.success(task); + } else { + // remove duplicate actions as allocation service expects a clean list without duplicates + if (seenShardRoutings.contains(matched)) { + logger.trace("{} ignoring shard started task [{}] (already scheduled to start {})", task.shardId, task, matched); + tasksToBeApplied.add(task); + } else { + logger.debug("{} starting shard {} (shard started task: [{}])", task.shardId, matched, task); + tasksToBeApplied.add(task); + shardRoutingsToBeApplied.add(matched); + seenShardRoutings.add(matched); + } + } + } } + assert tasksToBeApplied.size() >= shardRoutingsToBeApplied.size(); + ClusterState maybeUpdatedState = currentState; try { RoutingAllocation.Result result = @@ -388,9 +414,10 @@ public class ShardStateAction extends AbstractComponent { if (result.changed()) { maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build(); } - builder.successes(tasks); + builder.successes(tasksToBeApplied); } catch (Exception e) { - builder.failures(tasks, e); + logger.warn("failed to apply started shards {}", e, shardRoutingsToBeApplied); + builder.failures(tasksToBeApplied, e); } return builder.build(maybeUpdatedState); @@ -402,31 +429,38 @@ public class ShardStateAction extends AbstractComponent { } } - public static class ShardRoutingEntry extends TransportRequest { - ShardRouting shardRouting; - ShardRouting sourceShardRouting; + public static class ShardEntry extends TransportRequest { + ShardId shardId; + String allocationId; + long primaryTerm; String message; Exception failure; - public ShardRoutingEntry() { + public ShardEntry() { } - ShardRoutingEntry(ShardRouting shardRouting, ShardRouting sourceShardRouting, String message, @Nullable Exception failure) { - this.shardRouting = shardRouting; - this.sourceShardRouting = sourceShardRouting; + public ShardEntry(ShardId shardId, String allocationId, long primaryTerm, String message, @Nullable Exception failure) { + this.shardId = shardId; + this.allocationId = allocationId; + this.primaryTerm = primaryTerm; this.message = message; this.failure = failure; } - public ShardRouting getShardRouting() { - return shardRouting; + public ShardId getShardId() { + return shardId; + } + + public String getAllocationId() { + return allocationId; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardRouting = new ShardRouting(in); - sourceShardRouting = new ShardRouting(in); + shardId = ShardId.readShardId(in); + allocationId = in.readString(); + primaryTerm = in.readVLong(); message = in.readString(); failure = in.readException(); } @@ -434,8 +468,9 @@ public class ShardStateAction extends AbstractComponent { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - shardRouting.writeTo(out); - sourceShardRouting.writeTo(out); + shardId.writeTo(out); + out.writeString(allocationId); + out.writeVLong(primaryTerm); out.writeString(message); out.writeException(failure); } @@ -443,8 +478,9 @@ public class ShardStateAction extends AbstractComponent { @Override public String toString() { List components = new ArrayList<>(4); - components.add("target shard [" + shardRouting + "]"); - components.add("source shard [" + sourceShardRouting + "]"); + components.add("shard id [" + shardId + "]"); + components.add("allocation id [" + allocationId + "]"); + components.add("primary term [" + primaryTerm + "]"); components.add("message [" + message + "]"); if (failure != null) { components.add("failure [" + ExceptionsHelper.detailedMessage(failure) + "]"); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index cddf6f98a54..619959923e9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -98,7 +98,7 @@ public class IndexShardRoutingTable implements Iterable { } if (shard.relocating()) { // create the target initializing shard routing on the node the shard is relocating to - allInitializingShards.add(shard.buildTargetRelocatingShard()); + allInitializingShards.add(shard.getTargetRelocatingShard()); } if (shard.assignedToNode()) { assignedShards.add(shard); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 54f7cf2bb76..f453f3c35ca 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -108,7 +108,7 @@ public class RoutingNodes implements Iterable { k -> new LinkedHashMap<>()); // LinkedHashMap to preserve order // add the counterpart shard with relocatingNodeId reflecting the source from which // it's relocating from. - ShardRouting targetShardRouting = shard.buildTargetRelocatingShard(); + ShardRouting targetShardRouting = shard.getTargetRelocatingShard(); addInitialRecovery(targetShardRouting, indexShard.primary); previousValue = entries.put(targetShardRouting.shardId(), targetShardRouting); if (previousValue != null) { @@ -276,6 +276,20 @@ public class RoutingNodes implements Iterable { return replicaSet == null ? EMPTY : Collections.unmodifiableList(replicaSet); } + @Nullable + public ShardRouting getByAllocationId(ShardId shardId, String allocationId) { + final List replicaSet = assignedShards.get(shardId); + if (replicaSet == null) { + return null; + } + for (ShardRouting shardRouting : replicaSet) { + if (shardRouting.allocationId().getId().equals(allocationId)) { + return shardRouting; + } + } + return null; + } + /** * Returns the active primary shard for the given shard id or null if * no primary is found or the primary is not active. @@ -406,7 +420,7 @@ public class RoutingNodes implements Iterable { ensureMutable(); relocatingShards++; ShardRouting source = shard.relocate(nodeId, expectedShardSize); - ShardRouting target = source.buildTargetRelocatingShard(); + ShardRouting target = source.getTargetRelocatingShard(); updateAssigned(shard, source); node(target.currentNodeId()).add(target); assignedShardsAdd(target); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index f43517ec559..6b7651b5bfc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.Diffable; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -145,6 +146,26 @@ public class RoutingTable implements Iterable, Diffable, DiffableemptyList())); @@ -278,7 +299,7 @@ public class RoutingTable implements Iterable, Diffable asList; private final long expectedShardSize; + @Nullable + private final ShardRouting targetRelocatingShard; /** * A constructor to internally create shard routing instances, note, the internal flag should only be set to true @@ -74,11 +76,22 @@ public final class ShardRouting implements Writeable, ToXContent { this.unassignedInfo = unassignedInfo; this.allocationId = allocationId; this.expectedShardSize = expectedShardSize; + this.targetRelocatingShard = initializeTargetRelocatingShard(); assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta"; } + @Nullable + private ShardRouting initializeTargetRelocatingShard() { + if (state == ShardRoutingState.RELOCATING) { + return new ShardRouting(shardId, relocatingNodeId, currentNodeId, restoreSource, primary, + ShardRoutingState.INITIALIZING, unassignedInfo, AllocationId.newTargetRelocation(allocationId), expectedShardSize); + } else { + return null; + } + } + /** * Creates a new unassigned shard. */ @@ -177,14 +190,13 @@ public final class ShardRouting implements Writeable, ToXContent { } /** - * Creates a shard routing representing the target shard. + * Returns a shard routing representing the target shard. * The target shard routing will be the INITIALIZING state and have relocatingNodeId set to the * source node. */ - public ShardRouting buildTargetRelocatingShard() { + public ShardRouting getTargetRelocatingShard() { assert relocating(); - return new ShardRouting(shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, unassignedInfo, - AllocationId.newTargetRelocation(allocationId), expectedShardSize); + return targetRelocatingShard; } /** @@ -282,6 +294,7 @@ public final class ShardRouting implements Writeable, ToXContent { } expectedShardSize = shardSize; asList = Collections.singletonList(this); + targetRelocatingShard = initializeTargetRelocatingShard(); } public ShardRouting(StreamInput in) throws IOException { @@ -453,7 +466,7 @@ public final class ShardRouting implements Writeable, ToXContent { } /** - * Returns true if this shard is a relocation target for another shard (i.e., was created with {@link #buildTargetRelocatingShard()} + * Returns true if this shard is a relocation target for another shard (i.e., was created with {@link #initializeTargetRelocatingShard()} */ public boolean isRelocationTarget() { return state == ShardRoutingState.INITIALIZING && relocatingNodeId != null; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index f984e8b4f1e..f58bc22c63f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation.Result; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; @@ -84,23 +85,25 @@ public class AllocationService extends AbstractComponent { } /** - * Applies the started shards. Note, shards can be called several times within this method. + * Applies the started shards. Note, only initializing ShardRouting instances that exist in the routing table should be + * provided as parameter and no duplicates should be contained. *

* If the same instance of the routing table is returned, then no change has been made.

*/ - public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List startedShards) { + public Result applyStartedShards(ClusterState clusterState, List startedShards) { return applyStartedShards(clusterState, startedShards, true); } - public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List startedShards, boolean withReroute) { + public Result applyStartedShards(ClusterState clusterState, List startedShards, boolean withReroute) { + if (startedShards.isEmpty()) { + return new Result(false, clusterState.routingTable(), clusterState.metaData()); + } RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); // shuffle the unassigned nodes, just so we won't have things like poison failed shards routingNodes.unassigned().shuffle(); - StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards, clusterInfoService.getClusterInfo(), currentNanoTime()); - boolean changed = applyStartedShards(allocation, startedShards); - if (!changed) { - return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); - } + StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards, + clusterInfoService.getClusterInfo(), currentNanoTime()); + applyStartedShards(allocation, startedShards); gatewayAllocator.applyStartedShards(allocation); if (withReroute) { reroute(allocation); @@ -109,12 +112,12 @@ public class AllocationService extends AbstractComponent { return buildResultAndLogHealthChange(allocation, "shards started [" + startedShardsAsString + "] ..."); } - protected RoutingAllocation.Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason) { + protected Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason) { return buildResultAndLogHealthChange(allocation, reason, new RoutingExplanations()); } - protected RoutingAllocation.Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason, RoutingExplanations explanations) { + protected Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason, RoutingExplanations explanations) { MetaData oldMetaData = allocation.metaData(); RoutingTable oldRoutingTable = allocation.routingTable(); RoutingNodes newRoutingNodes = allocation.routingNodes(); @@ -128,7 +131,7 @@ public class AllocationService extends AbstractComponent { metaData(newMetaData).routingTable(newRoutingTable).build()), reason ); - return new RoutingAllocation.Result(true, newRoutingTable, newMetaData, explanations); + return new Result(true, newRoutingTable, newMetaData, explanations); } /** @@ -186,7 +189,7 @@ public class AllocationService extends AbstractComponent { // we do not use newPrimary.isTargetRelocationOf(oldPrimary) because that one enforces newPrimary to // be initializing. However, when the target shard is activated, we still want the primary term to staty // the same - (oldPrimary.relocating() && newPrimary.isSameAllocation(oldPrimary.buildTargetRelocatingShard()))) { + (oldPrimary.relocating() && newPrimary.isSameAllocation(oldPrimary.getTargetRelocatingShard()))) { // do nothing } else { // incrementing the primary term @@ -210,37 +213,44 @@ public class AllocationService extends AbstractComponent { } } - public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) { + public Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) { return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null))); } /** - * Applies the failed shards. Note, shards can be called several times within this method. + * Applies the failed shards. Note, only assigned ShardRouting instances that exist in the routing table should be + * provided as parameter and no duplicates should be contained. + * *

* If the same instance of the routing table is returned, then no change has been made.

*/ - public RoutingAllocation.Result applyFailedShards(ClusterState clusterState, List failedShards) { + public Result applyFailedShards(ClusterState clusterState, List failedShards) { + if (failedShards.isEmpty()) { + return new Result(false, clusterState.routingTable(), clusterState.metaData()); + } RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); // shuffle the unassigned nodes, just so we won't have things like poison failed shards routingNodes.unassigned().shuffle(); long currentNanoTime = currentNanoTime(); - FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards, clusterInfoService.getClusterInfo(), currentNanoTime); - boolean changed = false; - // as failing primaries also fail associated replicas, we fail replicas first here so that their nodes are added to ignore list + FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards, + clusterInfoService.getClusterInfo(), currentNanoTime); + + // as failing primaries also fail associated replicas, we fail replicas first here to avoid re-resolving replica ShardRouting List orderedFailedShards = new ArrayList<>(failedShards); - orderedFailedShards.sort(Comparator.comparing(failedShard -> failedShard.shard.primary())); - for (FailedRerouteAllocation.FailedShard failedShard : orderedFailedShards) { - UnassignedInfo unassignedInfo = failedShard.shard.unassignedInfo(); - final int failedAllocations = unassignedInfo != null ? unassignedInfo.getNumFailedAllocations() : 0; - changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure, - failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false, AllocationStatus.NO_ATTEMPT)); - } - if (!changed) { - return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); + orderedFailedShards.sort(Comparator.comparing(failedShard -> failedShard.routingEntry.primary())); + + for (FailedRerouteAllocation.FailedShard failedShardEntry : orderedFailedShards) { + ShardRouting failedShard = failedShardEntry.routingEntry; + final int failedAllocations = failedShard.unassignedInfo() != null ? failedShard.unassignedInfo().getNumFailedAllocations() : 0; + UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShardEntry.message, + failedShardEntry.failure, failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false, + AllocationStatus.NO_ATTEMPT); + allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId()); + applyFailedShard(allocation, failedShard, unassignedInfo); } gatewayAllocator.applyFailedShards(allocation); reroute(allocation); - String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString()); + String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.routingEntry.shardId().toString()); return buildResultAndLogHealthChange(allocation, "shards failed [" + failedShardsAsString + "] ..."); } @@ -259,9 +269,9 @@ public class AllocationService extends AbstractComponent { metaData.getIndexSafe(shardRouting.index()).getSettings()); if (newComputedLeftDelayNanos == 0) { changed = true; - unassignedIterator.updateUnassignedInfo(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(), unassignedInfo.getFailure(), - unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(), unassignedInfo.getUnassignedTimeInMillis(), false, - unassignedInfo.getLastAllocationStatus())); + unassignedIterator.updateUnassignedInfo(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(), + unassignedInfo.getFailure(), unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(), + unassignedInfo.getUnassignedTimeInMillis(), false, unassignedInfo.getLastAllocationStatus())); } } } @@ -285,7 +295,7 @@ public class AllocationService extends AbstractComponent { .collect(Collectors.joining(", ")); } - public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) { + public Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) { RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); // we don't shuffle the unassigned shards here, to try and get as close as possible to // a consistent result of the effect the commands have on the routing @@ -311,7 +321,7 @@ public class AllocationService extends AbstractComponent { *

* If the same instance of the routing table is returned, then no change has been made. */ - public RoutingAllocation.Result reroute(ClusterState clusterState, String reason) { + public Result reroute(ClusterState clusterState, String reason) { return reroute(clusterState, reason, false); } @@ -320,7 +330,7 @@ public class AllocationService extends AbstractComponent { *

* If the same instance of the routing table is returned, then no change has been made. */ - protected RoutingAllocation.Result reroute(ClusterState clusterState, String reason, boolean debug) { + protected Result reroute(ClusterState clusterState, String reason, boolean debug) { RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); // shuffle the unassigned nodes, just so we won't have things like poison failed shards routingNodes.unassigned().shuffle(); @@ -328,7 +338,7 @@ public class AllocationService extends AbstractComponent { clusterInfoService.getClusterInfo(), currentNanoTime(), false); allocation.debugDecision(debug); if (!reroute(allocation)) { - return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); + return new Result(false, clusterState.routingTable(), clusterState.metaData()); } return buildResultAndLogHealthChange(allocation, reason); } @@ -420,7 +430,7 @@ public class AllocationService extends AbstractComponent { boolean delayed = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).nanos() > 0; UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]", null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed, AllocationStatus.NO_ATTEMPT); - applyFailedShard(allocation, shardRouting, false, unassignedInfo); + applyFailedShard(allocation, shardRouting, unassignedInfo); } // its a dead node, remove it, note, its important to remove it *after* we apply failed shard // since it relies on the fact that the RoutingNode exists in the list of nodes @@ -429,111 +439,70 @@ public class AllocationService extends AbstractComponent { return changed; } - private boolean failReplicasForUnassignedPrimary(RoutingAllocation allocation, ShardRouting primary) { + private boolean failReplicasForUnassignedPrimary(RoutingAllocation allocation, ShardRouting failedPrimary) { + assert failedPrimary.primary() : "can only fail replicas for primary shard: " + failedPrimary; List replicas = new ArrayList<>(); - for (ShardRouting routing : allocation.routingNodes().assignedShards(primary.shardId())) { + for (ShardRouting routing : allocation.routingNodes().assignedShards(failedPrimary.shardId())) { if (!routing.primary() && routing.initializing()) { replicas.add(routing); } } - boolean changed = false; - for (ShardRouting routing : replicas) { - changed |= applyFailedShard(allocation, routing, false, - new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED, "primary failed while replica initializing", - null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false, - AllocationStatus.NO_ATTEMPT)); + for (ShardRouting failedReplica : replicas) { + UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED, + "primary failed while replica initializing", null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false, + AllocationStatus.NO_ATTEMPT); + applyFailedShard(allocation, failedReplica, unassignedInfo); } - return changed; + return replicas.isEmpty() == false; } - private boolean applyStartedShards(RoutingAllocation routingAllocation, Iterable startedShardEntries) { - boolean dirty = false; - // apply shards might be called several times with the same shard, ignore it + private void applyStartedShards(RoutingAllocation routingAllocation, List startedShardEntries) { + assert startedShardEntries.isEmpty() == false : "non-empty list of started shard entries expected"; RoutingNodes routingNodes = routingAllocation.routingNodes(); for (ShardRouting startedShard : startedShardEntries) { - assert startedShard.initializing(); + assert startedShard.initializing() : "only initializing shards can be started"; + assert routingAllocation.metaData().index(startedShard.shardId().getIndex()) != null : + "shard started for unknown index (shard entry: " + startedShard + ")"; + assert startedShard == routingNodes.getByAllocationId(startedShard.shardId(), startedShard.allocationId().getId()) : + "shard routing to start does not exist in routing table, expected: " + startedShard + " but was: " + + routingNodes.getByAllocationId(startedShard.shardId(), startedShard.allocationId().getId()); - // validate index still exists. strictly speaking this is not needed but it gives clearer logs - if (routingAllocation.metaData().index(startedShard.index()) == null) { - logger.debug("{} ignoring shard started, unknown index (routing: {})", startedShard.shardId(), startedShard); - continue; - } + routingNodes.started(startedShard); + logger.trace("{} marked shard as started (routing: {})", startedShard.shardId(), startedShard); - RoutingNode currentRoutingNode = routingNodes.node(startedShard.currentNodeId()); - if (currentRoutingNode == null) { - logger.debug("{} failed to find shard in order to start it [failed to find node], ignoring (routing: {})", startedShard.shardId(), startedShard); - continue; - } - - ShardRouting matchingShard = currentRoutingNode.getByShardId(startedShard.shardId()); - if (matchingShard == null) { - logger.debug("{} failed to find shard in order to start it [failed to find shard], ignoring (routing: {})", startedShard.shardId(), startedShard); - } else if (matchingShard.isSameAllocation(startedShard) == false) { - logger.debug("{} failed to find shard with matching allocation id in order to start it [failed to find matching shard], ignoring (routing: {}, matched shard routing: {})", startedShard.shardId(), startedShard, matchingShard); - } else { - startedShard = matchingShard; - if (startedShard.active()) { - logger.trace("{} shard is already started, ignoring (routing: {})", startedShard.shardId(), startedShard); - } else { - assert startedShard.initializing(); - dirty = true; - routingNodes.started(startedShard); - logger.trace("{} marked shard as started (routing: {})", startedShard.shardId(), startedShard); - - if (startedShard.relocatingNodeId() != null) { - // relocation target has been started, remove relocation source - RoutingNode relocationSourceNode = routingNodes.node(startedShard.relocatingNodeId()); - ShardRouting relocationSourceShard = relocationSourceNode.getByShardId(startedShard.shardId()); - assert relocationSourceShard.isRelocationSourceOf(startedShard); - routingNodes.remove(relocationSourceShard); - } - } + if (startedShard.relocatingNodeId() != null) { + // relocation target has been started, remove relocation source + RoutingNode relocationSourceNode = routingNodes.node(startedShard.relocatingNodeId()); + ShardRouting relocationSourceShard = relocationSourceNode.getByShardId(startedShard.shardId()); + assert relocationSourceShard.isRelocationSourceOf(startedShard); + assert relocationSourceShard.getTargetRelocatingShard() == startedShard : "relocation target mismatch, expected: " + + startedShard + " but was: " + relocationSourceShard.getTargetRelocatingShard(); + routingNodes.remove(relocationSourceShard); } } - return dirty; } /** - * Applies the relevant logic to handle a failed shard. Returns true if changes happened that - * require relocation. + * Applies the relevant logic to handle a failed shard. */ - private boolean applyFailedShard(RoutingAllocation allocation, ShardRouting failedShard, boolean addToIgnoreList, UnassignedInfo unassignedInfo) { - IndexRoutingTable indexRoutingTable = allocation.routingTable().index(failedShard.index()); - if (indexRoutingTable == null) { - logger.debug("{} ignoring shard failure, unknown index in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); - return false; - } + private void applyFailedShard(RoutingAllocation allocation, ShardRouting failedShard, UnassignedInfo unassignedInfo) { RoutingNodes routingNodes = allocation.routingNodes(); + assert failedShard.assignedToNode() : "only assigned shards can be failed"; + assert allocation.metaData().index(failedShard.shardId().getIndex()) != null : + "shard failed for unknown index (shard entry: " + failedShard + ")"; + assert routingNodes.getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId()) == failedShard : + "shard routing to fail does not exist in routing table, expected: " + failedShard + " but was: " + + routingNodes.getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId()); - RoutingNode matchedNode = routingNodes.node(failedShard.currentNodeId()); - if (matchedNode == null) { - logger.debug("{} ignoring shard failure, unknown node in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); - return false; - } - - ShardRouting matchedShard = matchedNode.getByShardId(failedShard.shardId()); - if (matchedShard != null && matchedShard.isSameAllocation(failedShard)) { - logger.debug("{} failed shard {} found in routingNodes, failing it ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); - // replace incoming instance to make sure we work on the latest one - failedShard = matchedShard; - } else { - logger.debug("{} ignoring shard failure, unknown allocation id in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); - return false; - } - + logger.debug("{} failing shard {} with unassigned info ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); if (failedShard.primary()) { // fail replicas first otherwise we move RoutingNodes into an inconsistent state failReplicasForUnassignedPrimary(allocation, failedShard); } - if (addToIgnoreList) { - // make sure we ignore this shard on the relevant node - allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId()); - } - cancelShard(logger, failedShard, unassignedInfo, routingNodes); - assert matchedNode.getByShardId(failedShard.shardId()) == null : "failedShard " + failedShard + " was matched but wasn't removed"; - return true; + assert routingNodes.node(failedShard.currentNodeId()).getByShardId(failedShard.shardId()) == null : "failedShard " + failedShard + + " was matched but wasn't removed"; } public static void cancelShard(ESLogger logger, ShardRouting cancelledShard, UnassignedInfo unassignedInfo, RoutingNodes routingNodes) { @@ -544,11 +513,13 @@ public class AllocationService extends AbstractComponent { // The shard is a target of a relocating shard. In that case we only // need to remove the target shard and cancel the source relocation. // No shard is left unassigned - logger.trace("{} is a relocation target, resolving source to cancel relocation ({})", cancelledShard, unassignedInfo.shortSummary()); + logger.trace("{} is a relocation target, resolving source to cancel relocation ({})", cancelledShard, + unassignedInfo.shortSummary()); RoutingNode sourceNode = routingNodes.node(cancelledShard.relocatingNodeId()); ShardRouting sourceShard = sourceNode.getByShardId(cancelledShard.shardId()); assert sourceShard.isRelocationSourceOf(cancelledShard); - logger.trace("{}, resolved source to [{}]. canceling relocation ... ({})", cancelledShard.shardId(), sourceShard, unassignedInfo.shortSummary()); + logger.trace("{}, resolved source to [{}]. canceling relocation ... ({})", cancelledShard.shardId(), sourceShard, + unassignedInfo.shortSummary()); routingNodes.cancelRelocation(sourceShard); routingNodes.remove(cancelledShard); } else { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java index c3a397a785b..154acb43bb8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.index.shard.ShardId; import java.util.List; @@ -39,25 +40,28 @@ public class FailedRerouteAllocation extends RoutingAllocation { * details on why it failed. */ public static class FailedShard { - public final ShardRouting shard; + public final ShardRouting routingEntry; public final String message; public final Exception failure; - public FailedShard(ShardRouting shard, String message, Exception failure) { - this.shard = shard; + public FailedShard(ShardRouting routingEntry, String message, Exception failure) { + assert routingEntry.assignedToNode() : "only assigned shards can be failed " + routingEntry; + this.routingEntry = routingEntry; this.message = message; this.failure = failure; } @Override public String toString() { - return "failed shard, shard " + shard + ", message [" + message + "], failure [" + ExceptionsHelper.detailedMessage(failure) + "]"; + return "failed shard, shard " + routingEntry + ", message [" + message + "], failure [" + + ExceptionsHelper.detailedMessage(failure) + "]"; } } private final List failedShards; - public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List failedShards, ClusterInfo clusterInfo, long currentNanoTime) { + public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, + List failedShards, ClusterInfo clusterInfo, long currentNanoTime) { super(deciders, routingNodes, clusterState, clusterInfo, currentNanoTime, false); this.failedShards = failedShards; } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index f58ff54fc14..d26b976e6be 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -150,7 +150,8 @@ public class RoutingAllocation { * @param clusterState cluster state before rerouting * @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()}) */ - public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime, boolean retryFailed) { + public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, + long currentNanoTime, boolean retryFailed) { this.deciders = deciders; this.routingNodes = routingNodes; this.metaData = clusterState.metaData(); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java index 4d1ac1408a2..e63ce2b19e9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java @@ -33,9 +33,10 @@ import java.util.List; */ public class StartedRerouteAllocation extends RoutingAllocation { - private final List startedShards; + private final List startedShards; - public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List startedShards, ClusterInfo clusterInfo, long currentNanoTime) { + public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, + List startedShards, ClusterInfo clusterInfo, long currentNanoTime) { super(deciders, routingNodes, clusterState, clusterInfo, currentNanoTime, false); this.startedShards = startedShards; } @@ -44,7 +45,7 @@ public class StartedRerouteAllocation extends RoutingAllocation { * Get started shards * @return list of started shards */ - public List startedShards() { + public List startedShards() { return startedShards; } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index f2ab421ee5e..b880b04f3da 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -188,11 +188,11 @@ public class ThrottlingAllocationDecider extends AllocationDecider { } else if (shardRouting.relocating()) { initializingShard = shardRouting.cancelRelocation() .relocate(currentNodeId, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE) - .buildTargetRelocatingShard(); + .getTargetRelocatingShard(); } else { assert shardRouting.started(); initializingShard = shardRouting.relocate(currentNodeId, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE) - .buildTargetRelocatingShard(); + .getTargetRelocatingShard(); } assert initializingShard.initializing(); return initializingShard; diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index f074a3ec09c..ab6f6ae3ed2 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -124,8 +124,8 @@ public class GatewayAllocator extends AbstractComponent { public void applyFailedShards(FailedRerouteAllocation allocation) { for (FailedRerouteAllocation.FailedShard shard : allocation.failedShards()) { - Releasables.close(asyncFetchStarted.remove(shard.shard.shardId())); - Releasables.close(asyncFetchStore.remove(shard.shard.shardId())); + Releasables.close(asyncFetchStarted.remove(shard.routingEntry.shardId())); + Releasables.close(asyncFetchStore.remove(shard.routingEntry.shardId())); } } diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 6b2997b1a97..fd06175e28c 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -216,7 +216,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple if (masterNode != null) { // TODO: can we remove this? Is resending shard failures the responsibility of shardStateAction? String message = "master " + masterNode + " has not removed previously failed shard. resending shard failure"; logger.trace("[{}] re-sending failed shard [{}], reason [{}]", matchedRouting.shardId(), matchedRouting, message); - shardStateAction.shardFailed(matchedRouting, matchedRouting, message, null, SHARD_STATE_ACTION_LISTENER); + shardStateAction.localShardFailed(matchedRouting, message, null, SHARD_STATE_ACTION_LISTENER); } } } @@ -686,7 +686,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple try { logger.warn("[{}] marking and sending shard failed due to [{}]", failure, shardRouting.shardId(), message); failedShardsCache.put(shardRouting.shardId(), shardRouting); - shardStateAction.shardFailed(shardRouting, shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER); + shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER); } catch (Exception inner) { if (failure != null) inner.addSuppressed(failure); logger.warn( diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 8353f6dbacc..2e440d921ee 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -76,7 +76,7 @@ public class ReplicationOperationTests extends ESTestCase { // simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated state = ClusterState.builder(state) .nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build(); - primaryShard = primaryShard.buildTargetRelocatingShard(); + primaryShard = primaryShard.getTargetRelocatingShard(); } final Set expectedReplicas = getExpectedReplicas(shardId, state); @@ -161,7 +161,7 @@ public class ReplicationOperationTests extends ESTestCase { // simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated state = ClusterState.builder(state) .nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build(); - primaryShard = primaryShard.buildTargetRelocatingShard(); + primaryShard = primaryShard.getTargetRelocatingShard(); } final Set expectedReplicas = getExpectedReplicas(shardId, state); @@ -175,7 +175,7 @@ public class ReplicationOperationTests extends ESTestCase { final ClusterState finalState = state; final TestReplicaProxy replicasProxy = new TestReplicaProxy(expectedFailures) { @Override - public void failShard(ShardRouting replica, ShardRouting primary, String message, Exception exception, + public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess, Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { assertThat(replica, equalTo(failedReplica)); @@ -311,7 +311,7 @@ public class ReplicationOperationTests extends ESTestCase { } if (shardRouting.relocating() && localNodeId.equals(shardRouting.relocatingNodeId()) == false) { - expectedReplicas.add(shardRouting.buildTargetRelocatingShard()); + expectedReplicas.add(shardRouting.getTargetRelocatingShard()); } } } @@ -422,7 +422,7 @@ public class ReplicationOperationTests extends ESTestCase { } @Override - public void failShard(ShardRouting replica, ShardRouting primary, String message, Exception exception, Runnable onSuccess, + public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess, Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { if (failedReplicas.add(replica) == false) { fail("replica [" + replica + "] was failed twice"); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index bca17fb143b..9d8bf87757e 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -534,15 +534,16 @@ public class TransportReplicationActionTests extends ESTestCase { AtomicReference failure = new AtomicReference<>(); AtomicReference ignoredFailure = new AtomicReference<>(); AtomicBoolean success = new AtomicBoolean(); - proxy.failShard(replica, shardRoutings.primaryShard(), "test", new ElasticsearchException("simulated"), + proxy.failShard(replica, randomIntBetween(0, 10), "test", new ElasticsearchException("simulated"), () -> success.set(true), failure::set, ignoredFailure::set ); CapturingTransport.CapturedRequest[] shardFailedRequests = transport.getCapturedRequestsAndClear(); assertEquals(1, shardFailedRequests.length); CapturingTransport.CapturedRequest shardFailedRequest = shardFailedRequests[0]; - ShardStateAction.ShardRoutingEntry shardRoutingEntry = (ShardStateAction.ShardRoutingEntry) shardFailedRequest.request; + ShardStateAction.ShardEntry shardEntry = (ShardStateAction.ShardEntry) shardFailedRequest.request; // the shard the request was sent to and the shard to be failed should be the same - assertEquals(shardRoutingEntry.getShardRouting(), replica); + assertEquals(shardEntry.getShardId(), replica.shardId()); + assertEquals(shardEntry.getAllocationId(), replica.allocationId().getId()); if (randomBoolean()) { // simulate success transport.handleResponse(shardFailedRequest.requestId, TransportResponse.Empty.INSTANCE); @@ -553,7 +554,7 @@ public class TransportReplicationActionTests extends ESTestCase { } else if (randomBoolean()) { // simulate the primary has been demoted transport.handleRemoteError(shardFailedRequest.requestId, - new ShardStateAction.NoLongerPrimaryShardException(shardRoutingEntry.getShardRouting().shardId(), + new ShardStateAction.NoLongerPrimaryShardException(replica.shardId(), "shard-failed-test")); assertFalse(success.get()); assertNotNull(failure.get()); diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java index d12b6b563b3..31197e0a9a4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardIterator; @@ -51,7 +50,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -79,7 +77,8 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa .build()); numberOfReplicas = randomIntBetween(2, 16); metaData = MetaData.builder() - .put(IndexMetaData.builder(INDEX).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(numberOfReplicas)) + .put(IndexMetaData.builder(INDEX).settings(settings(Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(numberOfReplicas).primaryTerm(0, randomIntBetween(2, 10))) .build(); routingTable = RoutingTable.builder() .addAsNew(metaData.index(INDEX)) @@ -89,8 +88,8 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa } public void testEmptyTaskListProducesSameClusterState() throws Exception { - List tasks = Collections.emptyList(); - ClusterStateTaskExecutor.BatchResult result = + List tasks = Collections.emptyList(); + ClusterStateTaskExecutor.BatchResult result = executor.execute(clusterState, tasks); assertTasksSuccessful(tasks, result, clusterState, false); } @@ -98,35 +97,35 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa public void testDuplicateFailuresAreOkay() throws Exception { String reason = "test duplicate failures are okay"; ClusterState currentState = createClusterStateWithStartedShards(reason); - List tasks = createExistingShards(currentState, reason); - ClusterStateTaskExecutor.BatchResult result = executor.execute(currentState, tasks); + List tasks = createExistingShards(currentState, reason); + ClusterStateTaskExecutor.BatchResult result = executor.execute(currentState, tasks); assertTasksSuccessful(tasks, result, clusterState, true); } public void testNonExistentShardsAreMarkedAsSuccessful() throws Exception { String reason = "test non existent shards are marked as successful"; ClusterState currentState = createClusterStateWithStartedShards(reason); - List tasks = createNonExistentShards(currentState, reason); - ClusterStateTaskExecutor.BatchResult result = executor.execute(clusterState, tasks); + List tasks = createNonExistentShards(currentState, reason); + ClusterStateTaskExecutor.BatchResult result = executor.execute(clusterState, tasks); assertTasksSuccessful(tasks, result, clusterState, false); } public void testTriviallySuccessfulTasksBatchedWithFailingTasks() throws Exception { String reason = "test trivially successful tasks batched with failing tasks"; ClusterState currentState = createClusterStateWithStartedShards(reason); - List failingTasks = createExistingShards(currentState, reason); - List nonExistentTasks = createNonExistentShards(currentState, reason); + List failingTasks = createExistingShards(currentState, reason); + List nonExistentTasks = createNonExistentShards(currentState, reason); ShardStateAction.ShardFailedClusterStateTaskExecutor failingExecutor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger) { @Override RoutingAllocation.Result applyFailedShards(ClusterState currentState, List failedShards) { throw new RuntimeException("simulated applyFailedShards failure"); } }; - List tasks = new ArrayList<>(); + List tasks = new ArrayList<>(); tasks.addAll(failingTasks); tasks.addAll(nonExistentTasks); - ClusterStateTaskExecutor.BatchResult result = failingExecutor.execute(currentState, tasks); - Map taskResultMap = + ClusterStateTaskExecutor.BatchResult result = failingExecutor.execute(currentState, tasks); + Map taskResultMap = failingTasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.failure(new RuntimeException("simulated applyFailedShards failure")))); taskResultMap.putAll(nonExistentTasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.success()))); assertTaskResults(taskResultMap, result, currentState, false); @@ -135,16 +134,20 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa public void testIllegalShardFailureRequests() throws Exception { String reason = "test illegal shard failure requests"; ClusterState currentState = createClusterStateWithStartedShards(reason); - List failingTasks = createExistingShards(currentState, reason); - List tasks = new ArrayList<>(); - for (ShardStateAction.ShardRoutingEntry failingTask : failingTasks) { - tasks.add(new ShardStateAction.ShardRoutingEntry(failingTask.getShardRouting(), randomInvalidSourceShard(currentState, failingTask.getShardRouting()), failingTask.message, failingTask.failure)); + List failingTasks = createExistingShards(currentState, reason); + List tasks = new ArrayList<>(); + for (ShardStateAction.ShardEntry failingTask : failingTasks) { + long primaryTerm = currentState.metaData().index(failingTask.shardId.getIndex()).primaryTerm(failingTask.shardId.id()); + tasks.add(new ShardStateAction.ShardEntry(failingTask.shardId, failingTask.allocationId, + randomIntBetween(1, (int) primaryTerm - 1), failingTask.message, failingTask.failure)); } - Map taskResultMap = + Map taskResultMap = tasks.stream().collect(Collectors.toMap( Function.identity(), - task -> ClusterStateTaskExecutor.TaskResult.failure(new ShardStateAction.NoLongerPrimaryShardException(task.getShardRouting().shardId(), "source shard [" + task.sourceShardRouting + "] is neither the local allocation nor the primary allocation")))); - ClusterStateTaskExecutor.BatchResult result = executor.execute(currentState, tasks); + task -> ClusterStateTaskExecutor.TaskResult.failure(new ShardStateAction.NoLongerPrimaryShardException(task.shardId, + "primary term [" + task.primaryTerm + "] did not match current primary term [" + + currentState.metaData().index(task.shardId.getIndex()).primaryTerm(task.shardId.id()) + "]")))); + ClusterStateTaskExecutor.BatchResult result = executor.execute(currentState, tasks); assertTaskResults(taskResultMap, result, currentState, false); } @@ -163,7 +166,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa return ClusterState.builder(stateAfterReroute).routingTable(afterStart).build(); } - private List createExistingShards(ClusterState currentState, String reason) { + private List createExistingShards(ClusterState currentState, String reason) { List shards = new ArrayList<>(); GroupShardsIterator shardGroups = currentState.routingTable().allAssignedShardsGrouped(new String[] { INDEX }, true); @@ -182,7 +185,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa return toTasks(currentState, shardsToFail, indexUUID, reason); } - private List createNonExistentShards(ClusterState currentState, String reason) { + private List createNonExistentShards(ClusterState currentState, String reason) { // add shards from a non-existent index String nonExistentIndexUUID = "non-existent"; Index index = new Index("non-existent", nonExistentIndexUUID); @@ -196,17 +199,14 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa nonExistentShards.add(nonExistentShardRouting(index, nodeIds, false)); } - List existingShards = createExistingShards(currentState, reason); - List shardsWithMismatchedAllocationIds = new ArrayList<>(); - for (ShardStateAction.ShardRoutingEntry existingShard : existingShards) { - ShardRouting sr = existingShard.getShardRouting(); - ShardRouting nonExistentShardRouting = - TestShardRouting.newShardRouting(sr.shardId(), sr.currentNodeId(), sr.relocatingNodeId(), sr.restoreSource(), sr.primary(), sr.state()); - shardsWithMismatchedAllocationIds.add(new ShardStateAction.ShardRoutingEntry(nonExistentShardRouting, nonExistentShardRouting, existingShard.message, existingShard.failure)); + List existingShards = createExistingShards(currentState, reason); + List shardsWithMismatchedAllocationIds = new ArrayList<>(); + for (ShardStateAction.ShardEntry existingShard : existingShards) { + shardsWithMismatchedAllocationIds.add(new ShardStateAction.ShardEntry(existingShard.shardId, UUIDs.randomBase64UUID(), 0L, existingShard.message, existingShard.failure)); } - List tasks = new ArrayList<>(); - nonExistentShards.forEach(shard -> tasks.add(new ShardStateAction.ShardRoutingEntry(shard, shard, reason, new CorruptIndexException("simulated", nonExistentIndexUUID)))); + List tasks = new ArrayList<>(); + nonExistentShards.forEach(shard -> tasks.add(new ShardStateAction.ShardEntry(shard.shardId(), shard.allocationId().getId(), 0L, reason, new CorruptIndexException("simulated", nonExistentIndexUUID)))); tasks.addAll(shardsWithMismatchedAllocationIds); return tasks; } @@ -216,41 +216,42 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa } private static void assertTasksSuccessful( - List tasks, - ClusterStateTaskExecutor.BatchResult result, + List tasks, + ClusterStateTaskExecutor.BatchResult result, ClusterState clusterState, boolean clusterStateChanged ) { - Map taskResultMap = + Map taskResultMap = tasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.success())); assertTaskResults(taskResultMap, result, clusterState, clusterStateChanged); } private static void assertTaskResults( - Map taskResultMap, - ClusterStateTaskExecutor.BatchResult result, + Map taskResultMap, + ClusterStateTaskExecutor.BatchResult result, ClusterState clusterState, boolean clusterStateChanged ) { // there should be as many task results as tasks assertEquals(taskResultMap.size(), result.executionResults.size()); - for (Map.Entry entry : taskResultMap.entrySet()) { + for (Map.Entry entry : taskResultMap.entrySet()) { // every task should have a corresponding task result assertTrue(result.executionResults.containsKey(entry.getKey())); // the task results are as expected - assertEquals(entry.getValue().isSuccess(), result.executionResults.get(entry.getKey()).isSuccess()); + assertEquals(entry.getKey().toString(), entry.getValue().isSuccess(), result.executionResults.get(entry.getKey()).isSuccess()); } List shards = clusterState.getRoutingTable().allShards(); - for (Map.Entry entry : taskResultMap.entrySet()) { + for (Map.Entry entry : taskResultMap.entrySet()) { if (entry.getValue().isSuccess()) { - // the shard was successfully failed and so should not - // be in the routing table + // the shard was successfully failed and so should not be in the routing table for (ShardRouting shard : shards) { - if (entry.getKey().getShardRouting().allocationId() != null) { - assertThat(shard.allocationId(), not(equalTo(entry.getKey().getShardRouting().allocationId()))); + if (shard.assignedToNode()) { + assertFalse("entry key " + entry.getKey() + ", shard routing " + shard, + entry.getKey().getShardId().equals(shard.shardId()) && + entry.getKey().getAllocationId().equals(shard.allocationId().getId())); } } } else { @@ -268,50 +269,15 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa } } - private static List toTasks(ClusterState currentState, List shards, String indexUUID, String message) { + private static List toTasks(ClusterState currentState, List shards, String indexUUID, String message) { return shards .stream() - .map(shard -> new ShardStateAction.ShardRoutingEntry(shard, randomValidSourceShard(currentState, shard), message, new CorruptIndexException("simulated", indexUUID))) + .map(shard -> new ShardStateAction.ShardEntry( + shard.shardId(), + shard.allocationId().getId(), + randomBoolean() ? 0L : currentState.metaData().getIndexSafe(shard.index()).primaryTerm(shard.id()), + message, + new CorruptIndexException("simulated", indexUUID))) .collect(Collectors.toList()); } - - private static ShardRouting randomValidSourceShard(ClusterState currentState, ShardRouting shardRouting) { - // for the request node ID to be valid, either the request is - // from the node the shard is assigned to, or the request is - // from the node holding the primary shard - if (randomBoolean()) { - // request from local node - return shardRouting; - } else { - // request from primary node unless in the case of - // non-existent shards there is not one and we fallback to - // the local node - ShardRouting primaryNodeId = primaryShard(currentState, shardRouting); - return primaryNodeId != null ? primaryNodeId : shardRouting; - } - } - - private static ShardRouting randomInvalidSourceShard(ClusterState currentState, ShardRouting shardRouting) { - ShardRouting primaryShard = primaryShard(currentState, shardRouting); - Set shards = - currentState - .routingTable() - .allShards() - .stream() - .filter(shard -> !shard.isSameAllocation(shardRouting)) - .filter(shard -> !shard.isSameAllocation(primaryShard)) - .collect(Collectors.toSet()); - if (!shards.isEmpty()) { - return randomSubsetOf(1, shards.toArray(new ShardRouting[0])).get(0); - } else { - return - TestShardRouting.newShardRouting(shardRouting.shardId(), UUIDs.randomBase64UUID(random()), randomBoolean(), - randomFrom(ShardRoutingState.values())); - } - } - - private static ShardRouting primaryShard(ClusterState currentState, ShardRouting shardRouting) { - IndexShardRoutingTable indexShard = currentState.getRoutingTable().shardRoutingTableOrNull(shardRouting.shardId()); - return indexShard == null ? null : indexShard.primaryShard(); - } } diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index d387d6f7d43..762e7d9e75a 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -29,9 +29,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.ShardsIterator; -import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; @@ -61,6 +59,7 @@ import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; public class ShardStateActionTests extends ESTestCase { @@ -89,9 +88,9 @@ public class ShardStateActionTests extends ESTestCase { } @Override - protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardRoutingEntry shardRoutingEntry, Listener listener) { + protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardEntry shardEntry, Listener listener) { onBeforeWaitForNewMasterAndRetry.run(); - super.waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener); + super.waitForNewMasterAndRetry(actionName, observer, shardEntry, listener); onAfterWaitForNewMasterAndRetry.run(); } } @@ -140,7 +139,7 @@ public class ShardStateActionTests extends ESTestCase { CountDownLatch latch = new CountDownLatch(1); ShardRouting shardRouting = getRandomShardRouting(index); - shardStateAction.shardFailed(shardRouting, shardRouting, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + shardStateAction.localShardFailed(shardRouting, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { success.set(true); @@ -158,10 +157,11 @@ public class ShardStateActionTests extends ESTestCase { CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertEquals(1, capturedRequests.length); // the request is a shard failed request - assertThat(capturedRequests[0].request, is(instanceOf(ShardStateAction.ShardRoutingEntry.class))); - ShardStateAction.ShardRoutingEntry shardRoutingEntry = (ShardStateAction.ShardRoutingEntry) capturedRequests[0].request; + assertThat(capturedRequests[0].request, is(instanceOf(ShardStateAction.ShardEntry.class))); + ShardStateAction.ShardEntry shardEntry = (ShardStateAction.ShardEntry) capturedRequests[0].request; // for the right shard - assertEquals(shardRouting, shardRoutingEntry.getShardRouting()); + assertEquals(shardEntry.shardId, shardRouting.shardId()); + assertEquals(shardEntry.allocationId, shardRouting.allocationId().getId()); // sent to the master assertEquals(clusterService.state().nodes().getMasterNode().getId(), capturedRequests[0].node.getId()); @@ -188,7 +188,7 @@ public class ShardStateActionTests extends ESTestCase { }); ShardRouting failedShard = getRandomShardRouting(index); - shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { success.set(true); @@ -237,7 +237,7 @@ public class ShardStateActionTests extends ESTestCase { setUpMasterRetryVerification(numberOfRetries, retries, latch, retryLoop); ShardRouting failedShard = getRandomShardRouting(index); - shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { success.set(true); @@ -273,7 +273,7 @@ public class ShardStateActionTests extends ESTestCase { AtomicBoolean failure = new AtomicBoolean(); ShardRouting failedShard = getRandomShardRouting(index); - shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { failure.set(false); @@ -305,7 +305,7 @@ public class ShardStateActionTests extends ESTestCase { ShardRouting failedShard = getRandomShardRouting(index); RoutingTable routingTable = RoutingTable.builder(clusterService.state().getRoutingTable()).remove(index).build(); setState(clusterService, ClusterState.builder(clusterService.state()).routingTable(routingTable)); - shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { success.set(true); @@ -334,13 +334,12 @@ public class ShardStateActionTests extends ESTestCase { ShardRouting failedShard = getRandomShardRouting(index); - String nodeId = randomFrom(clusterService.state().nodes().getNodes().keys().toArray(String.class)); - AtomicReference failure = new AtomicReference<>(); CountDownLatch latch = new CountDownLatch(1); - ShardRouting sourceFailedShard = TestShardRouting.newShardRouting(failedShard.shardId(), nodeId, randomBoolean(), randomFrom(ShardRoutingState.values())); - shardStateAction.shardFailed(failedShard, sourceFailedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + long primaryTerm = clusterService.state().metaData().index(index).primaryTerm(failedShard.id()); + assertThat(primaryTerm, greaterThanOrEqualTo(1L)); + shardStateAction.remoteShardFailed(failedShard, primaryTerm + 1, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { failure.set(null); @@ -355,7 +354,7 @@ public class ShardStateActionTests extends ESTestCase { }); ShardStateAction.NoLongerPrimaryShardException catastrophicError = - new ShardStateAction.NoLongerPrimaryShardException(failedShard.shardId(), "source shard [" + sourceFailedShard + " is neither the local allocation nor the primary allocation"); + new ShardStateAction.NoLongerPrimaryShardException(failedShard.shardId(), "dummy failure"); CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); transport.handleRemoteError(capturedRequests[0].requestId, catastrophicError); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java index 2e368f322ee..9daee812193 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -70,7 +69,7 @@ public class AllocationIdTests extends ESTestCase { assertThat(shard.allocationId().getId(), equalTo(allocationId.getId())); assertThat(shard.allocationId().getRelocationId(), notNullValue()); - ShardRouting target = shard.buildTargetRelocatingShard(); + ShardRouting target = shard.getTargetRelocatingShard(); assertThat(target.allocationId().getId(), equalTo(shard.allocationId().getRelocationId())); assertThat(target.allocationId().getRelocationId(), equalTo(shard.allocationId().getId())); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java index fa9133f6d36..a689acd04a5 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.Version; import org.elasticsearch.common.UUIDs; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.test.ESTestCase; @@ -86,7 +85,7 @@ public class ShardRoutingTests extends ESTestCase { assertFalse(startedShard1.isRelocationTarget()); ShardRouting sourceShard0a = startedShard0.relocate("node2", -1); assertFalse(sourceShard0a.isRelocationTarget()); - ShardRouting targetShard0a = sourceShard0a.buildTargetRelocatingShard(); + ShardRouting targetShard0a = sourceShard0a.getTargetRelocatingShard(); assertTrue(targetShard0a.isRelocationTarget()); ShardRouting sourceShard0b = startedShard0.relocate("node2", -1); ShardRouting sourceShard1 = startedShard1.relocate("node2", -1); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 5f4d6b5a8ba..0b9e20b3578 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -37,6 +37,8 @@ import org.elasticsearch.test.ESAllocationTestCase; import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; +import java.util.Set; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; @@ -218,9 +220,6 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2"))); assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1)); assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED)); - - logger.info("fail the shard again, check that nothing happens"); - assertThat(strategy.applyFailedShard(clusterState, shardToFail).changed(), equalTo(false)); } public void testFirstAllocationFailureSingleNode() { @@ -274,9 +273,6 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1)); assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED)); } - - logger.info("fail the shard again, see that nothing happens"); - assertThat(strategy.applyFailedShard(clusterState, firstShard).changed(), equalTo(false)); } public void testSingleShardMultipleAllocationFailures() { @@ -317,11 +313,17 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { int shardsToFail = randomIntBetween(1, numberOfReplicas); ArrayList failedShards = new ArrayList<>(); RoutingNodes routingNodes = clusterState.getRoutingNodes(); + Set failedNodes = new HashSet<>(); + Set shardRoutingsToFail = new HashSet<>(); for (int i = 0; i < shardsToFail; i++) { - String n = "node" + Integer.toString(randomInt(numberOfReplicas)); - logger.info("failing shard on node [{}]", n); - ShardRouting shardToFail = routingNodes.node(n).iterator().next(); - failedShards.add(new FailedRerouteAllocation.FailedShard(shardToFail, null, null)); + String failedNode = "node" + Integer.toString(randomInt(numberOfReplicas)); + logger.info("failing shard on node [{}]", failedNode); + ShardRouting shardToFail = routingNodes.node(failedNode).iterator().next(); + if (shardRoutingsToFail.contains(shardToFail) == false) { + failedShards.add(new FailedRerouteAllocation.FailedShard(shardToFail, null, null)); + failedNodes.add(failedNode); + shardRoutingsToFail.add(shardToFail); + } } routingTable = strategy.applyFailedShards(clusterState, failedShards).routingTable(); @@ -329,8 +331,14 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.getRoutingNodes(); for (FailedRerouteAllocation.FailedShard failedShard : failedShards) { - if (!routingNodes.node(failedShard.shard.currentNodeId()).isEmpty()) { - fail("shard " + failedShard + " was re-assigned to it's node"); + if (routingNodes.getByAllocationId(failedShard.routingEntry.shardId(), failedShard.routingEntry.allocationId().getId()) != null) { + fail("shard " + failedShard + " was not failed"); + } + } + + for (String failedNode : failedNodes) { + if (!routingNodes.node(failedNode).isEmpty()) { + fail("shard was re-assigned to failed node " + failedNode); } } } @@ -390,9 +398,6 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1)); assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED)); } - - logger.info("fail the shard again, see that nothing happens"); - assertThat(strategy.applyFailedShard(clusterState, firstShard).changed(), equalTo(false)); } public void testRebalanceFailure() { @@ -530,10 +535,6 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); - - // simulate another failure coming in, with the "old" shard routing, verify that nothing changes, and we ignore it - routingResult = allocation.applyFailedShard(clusterState, primaryShardToFail); - assertThat(routingResult.changed(), equalTo(false)); } public void testFailAllReplicasInitializingOnPrimaryFailWhileHavingAReplicaToElect() { @@ -575,9 +576,5 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ShardRouting newPrimaryShard = clusterState.routingTable().index("test").shard(0).primaryShard(); assertThat(newPrimaryShard, not(equalTo(primaryShardToFail))); - - // simulate another failure coming in, with the "old" shard routing, verify that nothing changes, and we ignore it - routingResult = allocation.applyFailedShard(clusterState, primaryShardToFail); - assertThat(routingResult.changed(), equalTo(false)); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java index 16a11a9c150..4e32cd1e2cc 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; @@ -48,7 +47,7 @@ public class StartedShardsRoutingTests extends ESAllocationTestCase { logger.info("--> building initial cluster state"); final IndexMetaData indexMetaData = IndexMetaData.builder("test") .settings(settings(Version.CURRENT)) - .numberOfShards(3).numberOfReplicas(0) + .numberOfShards(2).numberOfReplicas(0) .build(); final Index index = indexMetaData.getIndex(); ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) @@ -56,69 +55,27 @@ public class StartedShardsRoutingTests extends ESAllocationTestCase { .metaData(MetaData.builder().put(indexMetaData, false)); final ShardRouting initShard = TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.INITIALIZING); - final ShardRouting startedShard = TestShardRouting.newShardRouting(new ShardId(index, 1), "node2", true, ShardRoutingState.STARTED); - final ShardRouting relocatingShard = TestShardRouting.newShardRouting(new ShardId(index, 2), "node1", "node2", true, ShardRoutingState.RELOCATING); + final ShardRouting relocatingShard = TestShardRouting.newShardRouting(new ShardId(index, 1), "node1", "node2", true, ShardRoutingState.RELOCATING); stateBuilder.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index) .addIndexShard(new IndexShardRoutingTable.Builder(initShard.shardId()).addShard(initShard).build()) - .addIndexShard(new IndexShardRoutingTable.Builder(startedShard.shardId()).addShard(startedShard).build()) .addIndexShard(new IndexShardRoutingTable.Builder(relocatingShard.shardId()).addShard(relocatingShard).build())).build()); ClusterState state = stateBuilder.build(); logger.info("--> test starting of shard"); - RoutingAllocation.Result result = allocation.applyStartedShards(state, Arrays.asList( - TestShardRouting.newShardRouting(initShard.shardId(), initShard.currentNodeId(), initShard.relocatingNodeId(), initShard.primary(), - ShardRoutingState.INITIALIZING, initShard.allocationId())), false); + RoutingAllocation.Result result = allocation.applyStartedShards(state, Arrays.asList(initShard), false); assertTrue("failed to start " + initShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); assertTrue(initShard + "isn't started \ncurrent routing table:" + result.routingTable().prettyPrint(), result.routingTable().index("test").shard(initShard.id()).allShardsStarted()); - logger.info("--> testing shard variants that shouldn't match the initializing shard"); - - result = allocation.applyStartedShards(state, Arrays.asList( - TestShardRouting.newShardRouting(initShard.shardId(), initShard.currentNodeId(), initShard.relocatingNodeId(), initShard.primary(), - ShardRoutingState.INITIALIZING)), false); - assertFalse("wrong allocation id flag shouldn't start shard " + initShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); - - result = allocation.applyStartedShards(state, Arrays.asList( - TestShardRouting.newShardRouting(initShard.shardId(), "some_node", initShard.currentNodeId(), initShard.primary(), - ShardRoutingState.INITIALIZING, AllocationId.newTargetRelocation(AllocationId.newRelocation(initShard.allocationId())))), false); - assertFalse("relocating shard from node shouldn't start shard " + initShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); - - - - logger.info("--> testing double starting"); - - result = allocation.applyStartedShards(state, Arrays.asList( - TestShardRouting.newShardRouting(startedShard.shardId(), startedShard.currentNodeId(), startedShard.relocatingNodeId(), startedShard.primary(), - ShardRoutingState.INITIALIZING, startedShard.allocationId())), false); - assertFalse("duplicate starting of the same shard should be ignored \ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); - logger.info("--> testing starting of relocating shards"); - final AllocationId targetAllocationId = AllocationId.newTargetRelocation(relocatingShard.allocationId()); - result = allocation.applyStartedShards(state, Arrays.asList( - TestShardRouting.newShardRouting(relocatingShard.shardId(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primary(), - ShardRoutingState.INITIALIZING, targetAllocationId)), false); - + result = allocation.applyStartedShards(state, Arrays.asList(relocatingShard.getTargetRelocatingShard()), false); assertTrue("failed to start " + relocatingShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); ShardRouting shardRouting = result.routingTable().index("test").shard(relocatingShard.id()).getShards().get(0); assertThat(shardRouting.state(), equalTo(ShardRoutingState.STARTED)); assertThat(shardRouting.currentNodeId(), equalTo("node2")); assertThat(shardRouting.relocatingNodeId(), nullValue()); - - logger.info("--> testing shard variants that shouldn't match the initializing relocating shard"); - - result = allocation.applyStartedShards(state, Arrays.asList( - TestShardRouting.newShardRouting(relocatingShard.shardId(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primary(), - ShardRoutingState.INITIALIZING))); - assertFalse("wrong allocation id shouldn't start shard" + relocatingShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); - - result = allocation.applyStartedShards(state, Arrays.asList( - TestShardRouting.newShardRouting(relocatingShard.shardId(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primary(), - ShardRoutingState.INITIALIZING, relocatingShard.allocationId())), false); - assertFalse("wrong allocation id shouldn't start shard even if relocatingId==shard.id" + relocatingShard + "\ncurrent routing table:" + result.routingTable().prettyPrint(), result.changed()); - } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 56ca6381af9..7ede869f0a6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -265,7 +265,7 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { assertEquals(10L, DiskThresholdDecider.getExpectedShardSize(test_0, allocation, 0)); RoutingNode node = new RoutingNode("node1", new DiscoveryNode("node1", new LocalTransportAddress("test"), - emptyMap(), emptySet(), Version.CURRENT), test_0, test_1.buildTargetRelocatingShard(), test_2); + emptyMap(), emptySet(), Version.CURRENT), test_0, test_1.getTargetRelocatingShard(), test_2); assertEquals(100L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, false, "/dev/null")); assertEquals(90L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, true, "/dev/null")); assertEquals(0L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, true, "/dev/some/other/dev")); @@ -283,7 +283,7 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { other_0 = ShardRoutingHelper.relocate(other_0, "node1"); node = new RoutingNode("node1", new DiscoveryNode("node1", new LocalTransportAddress("test"), - emptyMap(), emptySet(), Version.CURRENT), test_0, test_1.buildTargetRelocatingShard(), test_2, other_0.buildTargetRelocatingShard()); + emptyMap(), emptySet(), Version.CURRENT), test_0, test_1.getTargetRelocatingShard(), test_2, other_0.getTargetRelocatingShard()); if (other_0.primary()) { assertEquals(10100L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, false, "/dev/null")); assertEquals(10090L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, true, "/dev/null")); diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 466d3b4f83d..4ad3abc59b4 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -951,7 +951,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { NetworkPartition networkPartition = addRandomIsolation(isolatedNode); networkPartition.startDisrupting(); - service.shardFailed(failedShard, failedShard, "simulated", new CorruptIndexException("simulated", (String) null), new + service.localShardFailed(failedShard, "simulated", new CorruptIndexException("simulated", (String) null), new ShardStateAction.Listener() { @Override public void onSuccess() { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index cd2b4eaf2e4..2a0410f272b 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -563,7 +563,7 @@ public class NodeJoinControllerTests extends ESTestCase { } @Override - public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List startedShards, + public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List startedShards, boolean withReroute) { return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); } diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 27eb753dfe5..729bc549af4 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -477,7 +477,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { } @Override - public void failShard(ShardRouting replica, ShardRouting primary, String message, Exception exception, Runnable onSuccess, + public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess, Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { throw new UnsupportedOperationException(); } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 1a8caaa3514..152be45d558 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.cluster; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction; @@ -41,6 +42,7 @@ import org.elasticsearch.action.support.master.TransportMasterNodeActionUtils; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -53,12 +55,12 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RandomAllocationDeciderTests; -import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; @@ -78,6 +80,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.stream.Collectors; import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.elasticsearch.env.Environment.PATH_HOME_SETTING; @@ -91,10 +94,11 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class ClusterStateChanges { +public class ClusterStateChanges extends AbstractComponent { private final ClusterService clusterService; - private final AllocationService allocationService; + private final ShardStateAction.ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor; + private final ShardStateAction.ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor; // transport actions private final TransportCloseIndexAction transportCloseIndexAction; @@ -105,14 +109,16 @@ public class ClusterStateChanges { private final TransportCreateIndexAction transportCreateIndexAction; public ClusterStateChanges() { - Settings settings = Settings.builder().put(PATH_HOME_SETTING.getKey(), "dummy").build(); + super(Settings.builder().put(PATH_HOME_SETTING.getKey(), "dummy").build()); - allocationService = new AllocationService(settings, new AllocationDeciders(settings, + final AllocationService allocationService = new AllocationService(settings, new AllocationDeciders(settings, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(settings), new ReplicaAfterPrimaryActiveAllocationDecider(settings), new RandomAllocationDeciderTests.RandomAllocationDecider(getRandom())))), NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE); + shardFailedClusterStateTaskExecutor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger); + shardStartedClusterStateTaskExecutor = new ShardStateAction.ShardStartedClusterStateTaskExecutor(allocationService, logger); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ActionFilters actionFilters = new ActionFilters(Collections.emptySet()); IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings); @@ -199,13 +205,26 @@ public class ClusterStateChanges { } public ClusterState applyFailedShards(ClusterState clusterState, List failedShards) { - RoutingAllocation.Result rerouteResult = allocationService.applyFailedShards(clusterState, failedShards); - return ClusterState.builder(clusterState).routingResult(rerouteResult).build(); + List entries = failedShards.stream().map(failedShard -> + new ShardStateAction.ShardEntry(failedShard.routingEntry.shardId(), failedShard.routingEntry.allocationId().getId(), + 0L, failedShard.message, failedShard.failure)) + .collect(Collectors.toList()); + try { + return shardFailedClusterStateTaskExecutor.execute(clusterState, entries).resultingState; + } catch (Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } } public ClusterState applyStartedShards(ClusterState clusterState, List startedShards) { - RoutingAllocation.Result rerouteResult = allocationService.applyStartedShards(clusterState, startedShards); - return ClusterState.builder(clusterState).routingResult(rerouteResult).build(); + List entries = startedShards.stream().map(startedShard -> + new ShardStateAction.ShardEntry(startedShard.shardId(), startedShard.allocationId().getId(), 0L, "shard started", null)) + .collect(Collectors.toList()); + try { + return shardStartedClusterStateTaskExecutor.execute(clusterState, entries).resultingState; + } catch (Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } } private , Response extends ActionResponse> ClusterState execute( From 8bbc312fdd2442ce5c9ad431ab0c61930069df2f Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Thu, 4 Aug 2016 08:47:46 -0400 Subject: [PATCH 028/103] Fixes issue with dangling index being deleted instead of re-imported (#19666) Fixes an issue where a node that receives a cluster state update with a brand new cluster UUID but without an initial persistence block could cause indices to be wiped out, preventing them from being reimported as dangling indices. This commit only removes the in-memory data structures and thus, are subsequently reimported as dangling indices. --- .../cluster/ClusterChangedEvent.java | 12 +- .../elasticsearch/cluster/ClusterState.java | 1 + .../cluster/IndicesClusterStateService.java | 27 ++--- ...ClusterStateServiceRandomUpdatesTests.java | 106 ++++++++++++++++-- 4 files changed, 115 insertions(+), 31 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index efd525d313b..e3164eacdbb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -199,10 +199,14 @@ public class ClusterChangedEvent { return nodesRemoved() || nodesAdded(); } - // Determines whether or not the current cluster state represents an entirely - // different cluster from the previous cluster state, which will happen when a - // master node is elected that has never been part of the cluster before. - private boolean isNewCluster() { + /** + * Determines whether or not the current cluster state represents an entirely + * new cluster, either when a node joins a cluster for the first time or when + * the node receives a cluster state update from a brand new cluster (different + * UUID from the previous cluster), which will happen when a master node is + * elected that has never been part of the cluster before. + */ + public boolean isNewCluster() { final String prevClusterUUID = previousState.metaData().clusterUUID(); final String currClusterUUID = state.metaData().clusterUUID(); return prevClusterUUID.equals(currClusterUUID) == false; diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index abad2e9a8e4..6745900057d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -280,6 +280,7 @@ public class ClusterState implements ToXContent, Diffable { public String prettyPrint() { StringBuilder sb = new StringBuilder(); + sb.append("cluster uuid: ").append(metaData.clusterUUID()).append("\n"); sb.append("version: ").append(version).append("\n"); sb.append("state uuid: ").append(stateUUID).append("\n"); sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index fd06175e28c..f01a09d4608 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -177,7 +177,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple deleteIndices(event); // also deletes shards of deleted indices - removeUnallocatedIndices(state); // also removes shards of removed indices + removeUnallocatedIndices(event); // also removes shards of removed indices failMissingShards(state); @@ -286,28 +286,16 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple }); } } - - // delete local indices that do neither exist in previous cluster state nor part of tombstones - for (AllocatedIndex indexService : indicesService) { - Index index = indexService.index(); - IndexMetaData indexMetaData = event.state().metaData().index(index); - if (indexMetaData == null) { - assert false : "index" + index + " exists locally, doesn't have a metadata but is not part" - + " of the delete index list. \nprevious state: " + event.previousState().prettyPrint() - + "\n current state:\n" + event.state().prettyPrint(); - logger.warn("[{}] isn't part of metadata but is part of in memory structures. removing", index); - indicesService.deleteIndex(index, "isn't part of metadata (explicit check)"); - } - } } /** * Removes indices that have no shards allocated to this node. This does not delete the shard data as we wait for enough * shard copies to exist in the cluster before deleting shard data (triggered by {@link org.elasticsearch.indices.store.IndicesStore}). * - * @param state new cluster state + * @param event the cluster changed event */ - private void removeUnallocatedIndices(final ClusterState state) { + private void removeUnallocatedIndices(final ClusterChangedEvent event) { + final ClusterState state = event.state(); final String localNodeId = state.nodes().getLocalNodeId(); assert localNodeId != null; @@ -322,6 +310,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple for (AllocatedIndex indexService : indicesService) { Index index = indexService.index(); if (indicesWithShards.contains(index) == false) { + // if the cluster change indicates a brand new cluster, we only want + // to remove the in-memory structures for the index and not delete the + // contents on disk because the index will later be re-imported as a + // dangling index + assert state.metaData().index(index) != null || event.isNewCluster() : + "index " + index + " does not exist in the cluster state, it should either " + + "have been deleted or the cluster must be new"; logger.debug("{} removing index, no shards allocated", index); indicesService.removeIndex(index, "removing index (no shards allocated)"); } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index c2ccb9cd4ab..4477974d118 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -32,20 +32,26 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation.FailedShard; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.index.Index; import org.elasticsearch.indices.recovery.RecoveryTargetService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -56,9 +62,11 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.Executor; +import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -69,7 +77,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice public void testRandomClusterStateUpdates() { // we have an IndicesClusterStateService per node in the cluster final Map clusterStateServiceMap = new HashMap<>(); - ClusterState state = randomInitialClusterState(clusterStateServiceMap); + ClusterState state = randomInitialClusterState(clusterStateServiceMap, MockIndicesService::new); // each of the following iterations represents a new cluster state update processed on all nodes for (int i = 0; i < 30; i++) { @@ -78,7 +86,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice // calculate new cluster state for (int j = 0; j < randomInt(3); j++) { // multiple iterations to simulate batching of cluster states - state = randomlyUpdateClusterState(state, clusterStateServiceMap); + state = randomlyUpdateClusterState(state, clusterStateServiceMap, MockIndicesService::new); } // apply cluster state to nodes (incl. master) @@ -97,7 +105,65 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice logger.info("Final cluster state: {}", state.prettyPrint()); } - public ClusterState randomInitialClusterState(Map clusterStateServiceMap) { + /** + * This test ensures that when a node joins a brand new cluster (different cluster UUID), + * different from the cluster it was previously a part of, the in-memory index data structures + * are all removed but the on disk contents of those indices remain so that they can later be + * imported as dangling indices. Normally, the first cluster state update that the node + * receives from the new cluster would contain a cluster block that would cause all in-memory + * structures to be removed (see {@link IndicesClusterStateService#clusterChanged(ClusterChangedEvent)}), + * but in the case where the node joined and was a few cluster state updates behind, it would + * not have received the cluster block, in which case we still need to remove the in-memory + * structures while ensuring the data remains on disk. This test executes this particular + * scenario. + */ + public void testJoiningNewClusterOnlyRemovesInMemoryIndexStructures() { + // a cluster state derived from the initial state that includes a created index + String name = "index_" + randomAsciiOfLength(8).toLowerCase(Locale.ROOT); + ShardRoutingState[] replicaStates = new ShardRoutingState[randomIntBetween(0, 3)]; + Arrays.fill(replicaStates, ShardRoutingState.INITIALIZING); + ClusterState stateWithIndex = ClusterStateCreationUtils.state(name, randomBoolean(), ShardRoutingState.INITIALIZING, replicaStates); + + // the initial state which is derived from the newly created cluster state but doesn't contain the index + ClusterState initialState = ClusterState.builder(stateWithIndex) + .metaData(MetaData.builder(stateWithIndex.metaData()).remove(name)) + .routingTable(RoutingTable.builder().build()) + .build(); + + // pick a data node to simulate the adding an index cluster state change event on, that has shards assigned to it + DiscoveryNode node = stateWithIndex.nodes().get( + randomFrom(stateWithIndex.routingTable().index(name).shardsWithState(INITIALIZING)).currentNodeId()); + + // simulate the cluster state change on the node + ClusterState localState = adaptClusterStateToLocalNode(stateWithIndex, node); + ClusterState previousLocalState = adaptClusterStateToLocalNode(initialState, node); + IndicesClusterStateService indicesCSSvc = createIndicesClusterStateService(RecordingIndicesService::new); + indicesCSSvc.start(); + indicesCSSvc.clusterChanged(new ClusterChangedEvent("cluster state change that adds the index", localState, previousLocalState)); + + // create a new empty cluster state with a brand new cluster UUID + ClusterState newClusterState = ClusterState.builder(initialState) + .metaData(MetaData.builder(initialState.metaData()).clusterUUID(UUIDs.randomBase64UUID())) + .build(); + + // simulate the cluster state change on the node + localState = adaptClusterStateToLocalNode(newClusterState, node); + previousLocalState = adaptClusterStateToLocalNode(stateWithIndex, node); + indicesCSSvc.clusterChanged(new ClusterChangedEvent("cluster state change with a new cluster UUID (and doesn't contain the index)", + localState, previousLocalState)); + + // check that in memory data structures have been removed once the new cluster state is applied, + // but the persistent data is still there + RecordingIndicesService indicesService = (RecordingIndicesService) indicesCSSvc.indicesService; + for (IndexMetaData indexMetaData : stateWithIndex.metaData()) { + Index index = indexMetaData.getIndex(); + assertNull(indicesService.indexService(index)); + assertFalse(indicesService.isDeleted(index)); + } + } + + public ClusterState randomInitialClusterState(Map clusterStateServiceMap, + Supplier indicesServiceSupplier) { List allNodes = new ArrayList<>(); DiscoveryNode localNode = createNode(DiscoveryNode.Role.MASTER); // local node is the master allNodes.add(localNode); @@ -109,14 +175,15 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice } ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[allNodes.size()])); // add nodes to clusterStateServiceMap - updateNodes(state, clusterStateServiceMap); + updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); return state; } - private void updateNodes(ClusterState state, Map clusterStateServiceMap) { + private void updateNodes(ClusterState state, Map clusterStateServiceMap, + Supplier indicesServiceSupplier) { for (DiscoveryNode node : state.nodes()) { clusterStateServiceMap.computeIfAbsent(node, discoveryNode -> { - IndicesClusterStateService ics = createIndicesClusterStateService(); + IndicesClusterStateService ics = createIndicesClusterStateService(indicesServiceSupplier); ics.start(); return ics; }); @@ -131,7 +198,8 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice } public ClusterState randomlyUpdateClusterState(ClusterState state, - Map clusterStateServiceMap) { + Map clusterStateServiceMap, + Supplier indicesServiceSupplier) { // randomly create new indices (until we have 200 max) for (int i = 0; i < randomInt(5); i++) { if (state.metaData().indices().size() > 200) { @@ -229,7 +297,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).put(createNode()).build(); state = ClusterState.builder(state).nodes(newNodes).build(); state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after node leave - updateNodes(state, clusterStateServiceMap); + updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); } } else { // remove node @@ -239,7 +307,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).remove(discoveryNode.getId()).build(); state = ClusterState.builder(state).nodes(newNodes).build(); state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after node join - updateNodes(state, clusterStateServiceMap); + updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); } } } @@ -263,11 +331,11 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice return ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(node.getId())).build(); } - private IndicesClusterStateService createIndicesClusterStateService() { + private IndicesClusterStateService createIndicesClusterStateService(final Supplier indicesServiceSupplier) { final ThreadPool threadPool = mock(ThreadPool.class); final Executor executor = mock(Executor.class); when(threadPool.generic()).thenReturn(executor); - final MockIndicesService indicesService = new MockIndicesService(); + final MockIndicesService indicesService = indicesServiceSupplier.get(); final TransportService transportService = new TransportService(Settings.EMPTY, null, threadPool); final ClusterService clusterService = mock(ClusterService.class); final RepositoriesService repositoriesService = new RepositoriesService(Settings.EMPTY, clusterService, @@ -279,4 +347,20 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice threadPool, recoveryTargetService, shardStateAction, null, repositoriesService, null, null, null, null, null); } + private class RecordingIndicesService extends MockIndicesService { + private Set deletedIndices = Collections.emptySet(); + + @Override + public synchronized void deleteIndex(Index index, String reason) { + super.deleteIndex(index, reason); + Set newSet = Sets.newHashSet(deletedIndices); + newSet.add(index); + deletedIndices = Collections.unmodifiableSet(newSet); + } + + public synchronized boolean isDeleted(Index index) { + return deletedIndices.contains(index); + } + } + } From b0730bb2141173366ae7a73525de93c9d122e55c Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 4 Aug 2016 14:15:28 +0100 Subject: [PATCH 029/103] Fix PreBuiltTransportClientTests to run and pass This change does three things: 1. Makes PreBuiltTransportClientTests run since it was silently failing on a missing dependency 2. Makes PreBuiltTransportClientTests pass 3. Removes the http.type and transport.type from being set in the transport clients additional settings since these are set to `netty4` by default anyway. --- client/transport/build.gradle | 1 + .../transport/client/PreBuiltTransportClient.java | 6 +----- .../transport/client/PreBuiltTransportClientTests.java | 8 +++++--- .../org/elasticsearch/common/network/NetworkModule.java | 8 +++++--- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/client/transport/build.gradle b/client/transport/build.gradle index 140ef1b4303..dcb932e0ed6 100644 --- a/client/transport/build.gradle +++ b/client/transport/build.gradle @@ -34,6 +34,7 @@ dependencies { compile "org.elasticsearch.plugin:percolator-client:${version}" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"` } dependencyLicenses { diff --git a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java index 91e3e0830a1..287dd014c20 100644 --- a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java +++ b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java @@ -20,7 +20,6 @@ package org.elasticsearch.transport.client; import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.reindex.ReindexPlugin; @@ -79,10 +78,7 @@ public class PreBuiltTransportClient extends TransportClient { @Override public Settings additionalSettings() { - return Settings.builder() - .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) - .put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) - .put("netty.assert.buglevel", true) + return Settings.builder().put("netty.assert.buglevel", true) .build(); } diff --git a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java index 5b72006f5f0..c519b29f9bc 100644 --- a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java +++ b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java @@ -27,11 +27,12 @@ import org.elasticsearch.index.reindex.ReindexPlugin; import org.elasticsearch.percolator.PercolatorPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.mustache.MustachePlugin; -import org.elasticsearch.transport.Netty3Plugin; +import org.elasticsearch.transport.Netty4Plugin; import org.junit.Test; import java.util.Arrays; +import static org.junit.Assert.*; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; @@ -41,7 +42,8 @@ public class PreBuiltTransportClientTests extends RandomizedTest { public void testPluginInstalled() { try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) { Settings settings = client.settings(); - assertEquals(Netty3Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings)); + assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings)); + assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings)); } } @@ -54,7 +56,7 @@ public class PreBuiltTransportClientTests extends RandomizedTest { new PreBuiltTransportClient(Settings.EMPTY, plugin); fail("exception expected"); } catch (IllegalArgumentException ex) { - assertEquals("plugin is already installed", ex.getMessage()); + assertTrue(ex.getMessage().startsWith("plugin already exists: ")); } } } diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java index 824456fe514..f6c2ef326f2 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman import org.elasticsearch.common.ParseField; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.util.Providers; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; @@ -57,9 +56,12 @@ public class NetworkModule extends AbstractModule { public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type"; public static final String HTTP_TYPE_KEY = "http.type"; public static final String LOCAL_TRANSPORT = "local"; + public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default"; + public static final String TRANSPORT_TYPE_DEFAULT_KEY = "transport.type.default"; - public static final Setting TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString("transport.type.default", Property.NodeScope); - public static final Setting HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString("http.type.default", Property.NodeScope); + public static final Setting TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_DEFAULT_KEY, + Property.NodeScope); + public static final Setting HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope); public static final Setting HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope); public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope); public static final Setting TRANSPORT_SERVICE_TYPE_SETTING = From c08557d033b9ce67613c99abaa9e5082119a6160 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 4 Aug 2016 09:48:38 -0400 Subject: [PATCH 030/103] Wait for Netty 4 threads to terminate on close Today if the PreBuiltTransportClient is using Netty 4 transport, on shutdown some Netty 4 threads could linger. This commit causes the client to wait for these threads to shutdown upon termination. --- client/transport/build.gradle | 2 +- .../client/PreBuiltTransportClient.java | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/client/transport/build.gradle b/client/transport/build.gradle index dcb932e0ed6..c3dc2d84982 100644 --- a/client/transport/build.gradle +++ b/client/transport/build.gradle @@ -34,7 +34,7 @@ dependencies { compile "org.elasticsearch.plugin:percolator-client:${version}" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" - testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"` + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" } dependencyLicenses { diff --git a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java index 287dd014c20..8a28ab2df94 100644 --- a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java +++ b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java @@ -19,7 +19,10 @@ package org.elasticsearch.transport.client; +import io.netty.util.ThreadDeathWatcher; +import io.netty.util.concurrent.GlobalEventExecutor; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.reindex.ReindexPlugin; @@ -33,6 +36,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; /** * A builder to create an instance of {@link TransportClient} @@ -84,4 +88,18 @@ public class PreBuiltTransportClient extends TransportClient { } + @Override + public void close() { + super.close(); + if (!NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) + || NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(Netty4Plugin.NETTY_TRANSPORT_NAME)) { + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + } From 9f1525255ac8629d83364ca3b65c4cf86304dd9d Mon Sep 17 00:00:00 2001 From: Ryan Biesemeyer Date: Thu, 4 Aug 2016 13:56:28 +0000 Subject: [PATCH 031/103] Update link to mapper-murmur3 plugin in card docs (#19788) --- .../aggregations/metrics/cardinality-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc index b8ee0508618..73d7f3c26bb 100644 --- a/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc @@ -141,7 +141,7 @@ On string fields that have a high cardinality, it might be faster to store the hash of your field values in your index and then run the cardinality aggregation on this field. This can either be done by providing hash values from client-side or by letting elasticsearch compute hash values for you by using the -{plugins}/mapper-size.html[`mapper-murmur3`] plugin. +{plugins}/mapper-murmur3.html[`mapper-murmur3`] plugin. NOTE: Pre-computing hashes is usually only useful on very large and/or high-cardinality fields as it saves CPU and memory. However, on numeric From 2936810c631252e258d3e0e576d144b532cf9d9d Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 4 Aug 2016 10:03:08 -0400 Subject: [PATCH 032/103] Setting exists equals false instead of not exists This commit rewrites a boolean expression to check for equality with false instead of negating the existence check. --- .../elasticsearch/transport/client/PreBuiltTransportClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java index 8a28ab2df94..acc28f2e34a 100644 --- a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java +++ b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java @@ -91,7 +91,7 @@ public class PreBuiltTransportClient extends TransportClient { @Override public void close() { super.close(); - if (!NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) + if (NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) == false || NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(Netty4Plugin.NETTY_TRANSPORT_NAME)) { try { GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); From 34bb1508637368c43b792992646a612bb8022e99 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Thu, 4 Aug 2016 10:16:58 -0400 Subject: [PATCH 033/103] [TEST] Fixes primary term in TransportReplicationActionTests#testReplicaProxy --- .../support/replication/TransportReplicationActionTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 9d8bf87757e..6b1d0dd0885 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -534,7 +534,7 @@ public class TransportReplicationActionTests extends ESTestCase { AtomicReference failure = new AtomicReference<>(); AtomicReference ignoredFailure = new AtomicReference<>(); AtomicBoolean success = new AtomicBoolean(); - proxy.failShard(replica, randomIntBetween(0, 10), "test", new ElasticsearchException("simulated"), + proxy.failShard(replica, randomIntBetween(1, 10), "test", new ElasticsearchException("simulated"), () -> success.set(true), failure::set, ignoredFailure::set ); CapturingTransport.CapturedRequest[] shardFailedRequests = transport.getCapturedRequestsAndClear(); From 2cceb0a5f46a77b94d10ce68e08cde5f15469148 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 4 Aug 2016 17:17:53 +0200 Subject: [PATCH 034/103] Updated v5.0.0-alpha5 release notes --- .../release-notes/5.0.0-alpha5.asciidoc | 46 ++++++++++++++++++- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/docs/reference/release-notes/5.0.0-alpha5.asciidoc b/docs/reference/release-notes/5.0.0-alpha5.asciidoc index 3ba956f9f38..0137af17423 100644 --- a/docs/reference/release-notes/5.0.0-alpha5.asciidoc +++ b/docs/reference/release-notes/5.0.0-alpha5.asciidoc @@ -9,6 +9,9 @@ IMPORTANT: This is an alpha release and is intended for _testing purposes only_. [float] === Breaking changes +CAT API:: +* Improve cat thread pool API {pull}19721[#19721] (issue: {issue}19590[#19590]) + Cluster:: * Persistent Node Ids {pull}19140[#19140] (issue: {issue}17811[#17811]) @@ -18,6 +21,9 @@ Core:: Exceptions:: * Die with dignity {pull}19272[#19272] (issue: {issue}19231[#19231]) +Index APIs:: +* Removes write consistency level across replication action APIs in favor of wait_for_active_shards {pull}19454[#19454] (issue: {issue}18985[#18985]) + Scripting:: * Remove deprecated 1.x script and template syntax {pull}19387[#19387] (issue: {issue}13729[#13729]) @@ -30,10 +36,13 @@ Settings:: -[[breaking java-5.0.0-alpha5]] +[[breaking-java-5.0.0-alpha5]] [float] === Breaking Java changes +CRUD:: +* Removing isCreated and isFound from the Java API {pull}19645[#19645] (issues: {issue}19566[#19566], {issue}19631[#19631]) + Internal:: * Clean up BytesReference {pull}19196[#19196] @@ -61,6 +70,9 @@ Scripting:: Settings:: * Remove `node.mode` and `node.local` settings {pull}19428[#19428] +Snapshot/Restore:: +* Removes extra writeBlob method in BlobContainer {pull}19727[#19727] (issue: {issue}18528[#18528]) + [[deprecation-5.0.0-alpha5]] @@ -82,6 +94,9 @@ Templates:: [float] === New features +Aggregations:: +* Split regular histograms from date histograms. {pull}19551[#19551] (issues: {issue}4847[#4847], {issue}8082[#8082]) + Circuit Breakers:: * Circuit break on aggregation bucket numbers with request breaker {pull}19394[#19394] (issue: {issue}14046[#14046]) @@ -110,6 +125,7 @@ Translog:: === Enhancements Aggregations:: +* Make the heuristic to compute the default shard size less aggressive. {pull}19659[#19659] * Add _bucket_count option to buckets_path {pull}19571[#19571] (issue: {issue}19553[#19553]) * Remove AggregationStreams {pull}19507[#19507] * Migrate serial_diff aggregation to NamedWriteable {pull}19483[#19483] @@ -139,6 +155,7 @@ CAT API:: * Includes the index UUID in the _cat/indices API {pull}19204[#19204] (issue: {issue}19132[#19132]) CRUD:: +* #19664 Renaming operation to result and reworking responses {pull}19704[#19704] (issue: {issue}19664[#19664]) * Adding _operation field to index, update, delete response. {pull}19566[#19566] (issues: {issue}19267[#19267], {issue}9642[#9642], {issue}9736[#9736]) Cache:: @@ -153,6 +170,9 @@ Core:: * Makes index creation more friendly {pull}19450[#19450] (issue: {issue}9126[#9126]) * Clearer error when handling fractional time values {pull}19158[#19158] (issue: {issue}19102[#19102]) +Discovery:: +* Do not log cluster service errors at after joining a master {pull}19705[#19705] + Exceptions:: * Make NotMasterException a first class citizen {pull}19385[#19385] * Do not catch throwable {pull}19231[#19231] @@ -168,6 +188,7 @@ Ingest:: Internal:: * Make Priority an enum {pull}19448[#19448] +* Snapshot UUIDs in blob names {pull}19421[#19421] (issues: {issue}18156[#18156], {issue}18815[#18815], {issue}19002[#19002], {issue}7540[#7540]) * Add RestController method for deprecating in one step {pull}19343[#19343] * Tighten ensure atomic move cleanup {pull}19309[#19309] (issue: {issue}19036[#19036]) * Enable checkstyle ModifierOrder {pull}19214[#19214] @@ -197,6 +218,8 @@ Mapping:: * Elasticsearch should reject dynamic templates with unknown `match_mapping_type`. {pull}17285[#17285] (issue: {issue}16945[#16945]) Network:: +* Explicitly tell Netty to not use unsafe {pull}19786[#19786] (issues: {issue}19562[#19562], {issue}5624[#5624]) +* Enable Netty 4 extensions {pull}19767[#19767] (issue: {issue}19526[#19526]) * Modularize netty {pull}19392[#19392] * Simplify TcpTransport interface by reducing send code to a single send method {pull}19223[#19223] @@ -219,6 +242,7 @@ Plugin Mapper Size:: * Add doc values support to the _size field in the mapper-size plugin {pull}19217[#19217] (issue: {issue}18334[#18334]) Plugins:: +* Add ScriptService to dependencies available for plugin components {pull}19770[#19770] * Log one plugin info per line {pull}19441[#19441] * Make rest headers registration pull based {pull}19440[#19440] * Add resource watcher to services available for plugin components {pull}19401[#19401] @@ -238,6 +262,8 @@ Recovery:: * Non-blocking primary relocation hand-off {pull}19013[#19013] (issues: {issue}15900[#15900], {issue}18553[#18553]) Reindex API:: +* Only ask for `_version` we need it {pull}19693[#19693] (issue: {issue}19135[#19135]) +* Use fewer threads when reindexing-from-remote {pull}19636[#19636] * Support authentication with reindex-from-remote {pull}19310[#19310] * Support requests_per_second=-1 to mean no throttling in reindex {pull}19101[#19101] (issue: {issue}19089[#19089]) @@ -253,7 +279,10 @@ Settings:: * Validates new dynamic settings from the current state {pull}19122[#19122] (issue: {issue}19046[#19046]) Snapshot/Restore:: +* BlobContainer#writeBlob no longer can overwrite a blob {pull}19749[#19749] (issue: {issue}15579[#15579]) +* More resilient blob handling in snapshot repositories {pull}19706[#19706] (issues: {issue}18156[#18156], {issue}18815[#18815], {issue}19421[#19421], {issue}7540[#7540]) * Adding repository index generational files {pull}19002[#19002] (issue: {issue}18156[#18156]) +* Raised IOException on deleteBlob {pull}18815[#18815] (issue: {issue}18530[#18530]) Stats:: * Add missing field type in the FieldStats response. {pull}19241[#19241] (issue: {issue}17750[#17750]) @@ -266,6 +295,7 @@ Stats:: === Bug fixes Aggregations:: +* Undeprecates `aggs` in the search request {pull}19674[#19674] (issue: {issue}19504[#19504]) * Change how `nested` and `reverse_nested` aggs know about their nested depth level {pull}19550[#19550] (issues: {issue}11749[#11749], {issue}12410[#12410]) * Make ExtendedBounds immutable {pull}19490[#19490] (issue: {issue}19481[#19481]) * Fix potential AssertionError with include/exclude on terms aggregations. {pull}19252[#19252] (issue: {issue}18575[#18575]) @@ -279,23 +309,31 @@ Allocation:: Analysis:: * Fix analyzer alias processing {pull}19506[#19506] (issue: {issue}19163[#19163]) +CAT API:: +* Fixes cat tasks operation in detailed mode {pull}19759[#19759] (issue: {issue}19755[#19755]) +* Add index pattern wildcards support to _cat/shards {pull}19655[#19655] (issue: {issue}19634[#19634]) + Cluster:: +* Allow routing table to be filtered by index pattern {pull}19688[#19688] * Use executor's describeTasks method to log task information in cluster service {pull}19531[#19531] Core:: * Makes `m` case sensitive in TimeValue {pull}19649[#19649] (issue: {issue}19619[#19619]) +* Guard against negative result from FileStore.getUsableSpace when picking data path for a new shard {pull}19554[#19554] * Handle rejected execution exception on reschedule {pull}19505[#19505] Dates:: * Make sure TimeIntervalRounding is monotonic for increasing dates {pull}19020[#19020] Geo:: -* Incomplete results when using geo_distance for large distances [ISSUE] {pull}17578[#17578] +* Incomplete results when using geo_distance for large distances {pull}17578[#17578] Highlighting:: +* Plain highlighter should ignore parent/child queries {pull}19616[#19616] (issue: {issue}14999[#14999]) * Let fast vector highlighter also extract terms from the nested query's inner query. {pull}19337[#19337] (issue: {issue}19265[#19265]) Index APIs:: +* Fixes active shard count check in the case of `all` shards {pull}19760[#19760] * Add zero-padding to auto-generated rollover index name increment {pull}19610[#19610] (issue: {issue}19484[#19484]) Ingest:: @@ -320,6 +358,7 @@ Logging:: * Only log running out of slots when out of slots {pull}19637[#19637] Mapping:: +* Mappings: Fix detection of metadata fields in documents {pull}19765[#19765] * Fix not_analyzed string fields to error when position_increment_gap is set {pull}19510[#19510] * Automatically created indices should honor `index.mapper.dynamic`. {pull}19478[#19478] (issue: {issue}17592[#17592]) @@ -349,6 +388,7 @@ Plugin Repository S3:: * Fix repository S3 Settings and add more tests {pull}18703[#18703] (issues: {issue}18662[#18662], {issue}18690[#18690]) Query DSL:: +* Throw ParsingException if a query is wrapped in an array {pull}19750[#19750] (issue: {issue}12887[#12887]) * Restore parameter name auto_generate_phrase_queries {pull}19514[#19514] (issue: {issue}19512[#19512]) REST:: @@ -371,6 +411,7 @@ Stats:: * Allocation explain: Also serialize `includeDiskInfo` field {pull}19492[#19492] Store:: +* Tighten up concurrent store metadata listing and engine writes {pull}19684[#19684] (issue: {issue}19416[#19416]) * Make static Store access shard lock aware {pull}19416[#19416] (issue: {issue}18938[#18938]) * Catch assertion errors on commit and turn it into a real exception {pull}19357[#19357] (issue: {issue}19356[#19356]) @@ -381,6 +422,7 @@ Store:: === Upgrades Network:: +* Dependencies: Upgrade to netty 4.1.4 {pull}19689[#19689] * Introduce Netty 4 {pull}19526[#19526] (issue: {issue}3226[#3226]) * Upgrade to netty 3.10.6.Final {pull}19235[#19235] From 785624e96b03c5fc011bfd89429036d758b404b7 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 4 Aug 2016 11:22:13 -0400 Subject: [PATCH 035/103] Restore interrupted status on when closing client When closing a transport client that depends on Netty 4, interrupted exceptions can be thrown while shutting down some Netty threads. This commit refactors the handling of these exceptions to finish shutting down and then just restore the interrupted status. --- .../transport/client/PreBuiltTransportClient.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java index acc28f2e34a..cc7e722d802 100644 --- a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java +++ b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java @@ -21,6 +21,7 @@ package org.elasticsearch.transport.client; import io.netty.util.ThreadDeathWatcher; import io.netty.util.concurrent.GlobalEventExecutor; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Setting; @@ -95,9 +96,13 @@ public class PreBuiltTransportClient extends TransportClient { || NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(Netty4Plugin.NETTY_TRANSPORT_NAME)) { try { GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + try { ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); } catch (InterruptedException e) { - throw new RuntimeException(e); + Thread.currentThread().interrupt(); } } } From 5ab5cc69b8165e5349cc6384842c217223f5e478 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 3 Aug 2016 14:51:50 +0100 Subject: [PATCH 036/103] Remove unused rounding code Factor rounding and Interval rounding (the non-date based rounding) was no longer used so it has been removed. Offset rounding has been retained for no since both date based rounding classes rely on it --- .../common/rounding/Rounding.java | 150 ------------------ .../common/rounding/TimeZoneRounding.java | 23 ++- .../bucket/histogram/DateHistogramParser.java | 17 +- ...ingTests.java => OffsetRoundingTests.java} | 33 +--- 4 files changed, 22 insertions(+), 201 deletions(-) rename core/src/test/java/org/elasticsearch/common/rounding/{RoundingTests.java => OffsetRoundingTests.java} (70%) diff --git a/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java b/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java index 5633cf9f213..4ddba09cfab 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.rounding; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -54,157 +53,10 @@ public abstract class Rounding implements Streamable { @Override public abstract int hashCode(); - /** - * Rounding strategy which is based on an interval - * - * {@code rounded = value - (value % interval) } - */ - public static class Interval extends Rounding { - - static final byte ID = 0; - - public static final ParseField INTERVAL_FIELD = new ParseField("interval"); - - private long interval; - - public Interval() { // for serialization - } - - /** - * Creates a new interval rounding. - * - * @param interval The interval - */ - public Interval(long interval) { - this.interval = interval; - } - - @Override - public byte id() { - return ID; - } - - public static long roundKey(long value, long interval) { - if (value < 0) { - return (value - interval + 1) / interval; - } else { - return value / interval; - } - } - - public static long roundValue(long key, long interval) { - return key * interval; - } - - @Override - public long round(long value) { - return roundKey(value, interval) * interval; - } - - @Override - public long nextRoundingValue(long value) { - assert value == round(value); - return value + interval; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - interval = in.readVLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(interval); - } - - @Override - public int hashCode() { - return Objects.hash(interval); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - Interval other = (Interval) obj; - return Objects.equals(interval, other.interval); - } - } - - public static class FactorRounding extends Rounding { - - static final byte ID = 7; - - public static final ParseField FACTOR_FIELD = new ParseField("factor"); - - private Rounding rounding; - - private float factor; - - FactorRounding() { // for serialization - } - - FactorRounding(Rounding rounding, float factor) { - this.rounding = rounding; - this.factor = factor; - } - - @Override - public byte id() { - return ID; - } - - @Override - public long round(long utcMillis) { - return rounding.round((long) (factor * utcMillis)); - } - - @Override - public long nextRoundingValue(long value) { - return rounding.nextRoundingValue(value); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - rounding = Rounding.Streams.read(in); - factor = in.readFloat(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - Rounding.Streams.write(rounding, out); - out.writeFloat(factor); - } - - @Override - public int hashCode() { - return Objects.hash(rounding, factor); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - FactorRounding other = (FactorRounding) obj; - return Objects.equals(rounding, other.rounding) - && Objects.equals(factor, other.factor); - } - } - public static class OffsetRounding extends Rounding { static final byte ID = 8; - public static final ParseField OFFSET_FIELD = new ParseField("offset"); - private Rounding rounding; private long offset; @@ -274,10 +126,8 @@ public abstract class Rounding implements Streamable { Rounding rounding = null; byte id = in.readByte(); switch (id) { - case Interval.ID: rounding = new Interval(); break; case TimeZoneRounding.TimeUnitRounding.ID: rounding = new TimeZoneRounding.TimeUnitRounding(); break; case TimeZoneRounding.TimeIntervalRounding.ID: rounding = new TimeZoneRounding.TimeIntervalRounding(); break; - case TimeZoneRounding.FactorRounding.ID: rounding = new FactorRounding(); break; case OffsetRounding.ID: rounding = new OffsetRounding(); break; default: throw new ElasticsearchException("unknown rounding id [" + id + "]"); } diff --git a/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java b/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java index 932afa15b56..5287203df69 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.rounding; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -36,8 +35,6 @@ import java.util.Objects; * daylight saving times. */ public abstract class TimeZoneRounding extends Rounding { - public static final ParseField INTERVAL_FIELD = new ParseField("interval"); - public static final ParseField TIME_ZONE_FIELD = new ParseField("time_zone"); public static Builder builder(DateTimeUnit unit) { return new Builder(unit); @@ -54,8 +51,6 @@ public abstract class TimeZoneRounding extends Rounding { private DateTimeZone timeZone = DateTimeZone.UTC; - private float factor = 1.0f; - private long offset; public Builder(DateTimeUnit unit) { @@ -83,11 +78,6 @@ public abstract class TimeZoneRounding extends Rounding { return this; } - public Builder factor(float factor) { - this.factor = factor; - return this; - } - public Rounding build() { Rounding timeZoneRounding; if (unit != null) { @@ -98,9 +88,6 @@ public abstract class TimeZoneRounding extends Rounding { if (offset != 0) { timeZoneRounding = new OffsetRounding(timeZoneRounding, offset); } - if (factor != 1.0f) { - timeZoneRounding = new FactorRounding(timeZoneRounding, factor); - } return timeZoneRounding; } } @@ -215,7 +202,7 @@ public abstract class TimeZoneRounding extends Rounding { @Override public long round(long utcMillis) { long timeLocal = timeZone.convertUTCToLocal(utcMillis); - long rounded = Rounding.Interval.roundValue(Rounding.Interval.roundKey(timeLocal, interval), interval); + long rounded = roundKey(timeLocal, interval) * interval; long roundedUTC; if (isInDSTGap(rounded) == false) { roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis); @@ -238,6 +225,14 @@ public abstract class TimeZoneRounding extends Rounding { return roundedUTC; } + private static long roundKey(long value, long interval) { + if (value < 0) { + return (value - interval + 1) / interval; + } else { + return value / interval; + } + } + /** * Determine whether the local instant is a valid instant in the given * time zone. The logic for this is taken from diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java index f139ad18bb0..e3a3ea75762 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; @@ -45,7 +44,7 @@ public class DateHistogramParser extends NumericValuesSourceParser { protected DateHistogramAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, ValueType targetValueType, Map otherOptions) { DateHistogramAggregationBuilder factory = new DateHistogramAggregationBuilder(aggregationName); - Object interval = otherOptions.get(Rounding.Interval.INTERVAL_FIELD); + Object interval = otherOptions.get(Histogram.INTERVAL_FIELD); if (interval == null) { throw new ParsingException(null, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]"); } else if (interval instanceof Long) { @@ -55,7 +54,7 @@ public class DateHistogramParser extends NumericValuesSourceParser { } else { throw new IllegalStateException("Unexpected interval class: " + interval.getClass()); } - Long offset = (Long) otherOptions.get(Rounding.OffsetRounding.OFFSET_FIELD); + Long offset = (Long) otherOptions.get(Histogram.OFFSET_FIELD); if (offset != null) { factory.offset(offset); } @@ -83,12 +82,12 @@ public class DateHistogramParser extends NumericValuesSourceParser { protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { if (token.isValue()) { - if (parseFieldMatcher.match(currentFieldName, Rounding.Interval.INTERVAL_FIELD)) { + if (parseFieldMatcher.match(currentFieldName, Histogram.INTERVAL_FIELD)) { if (token == XContentParser.Token.VALUE_STRING) { - otherOptions.put(Rounding.Interval.INTERVAL_FIELD, new DateHistogramInterval(parser.text())); + otherOptions.put(Histogram.INTERVAL_FIELD, new DateHistogramInterval(parser.text())); return true; } else { - otherOptions.put(Rounding.Interval.INTERVAL_FIELD, parser.longValue()); + otherOptions.put(Histogram.INTERVAL_FIELD, parser.longValue()); return true; } } else if (parseFieldMatcher.match(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) { @@ -97,13 +96,13 @@ public class DateHistogramParser extends NumericValuesSourceParser { } else if (parseFieldMatcher.match(currentFieldName, Histogram.KEYED_FIELD)) { otherOptions.put(Histogram.KEYED_FIELD, parser.booleanValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, Rounding.OffsetRounding.OFFSET_FIELD)) { + } else if (parseFieldMatcher.match(currentFieldName, Histogram.OFFSET_FIELD)) { if (token == XContentParser.Token.VALUE_STRING) { - otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD, + otherOptions.put(Histogram.OFFSET_FIELD, DateHistogramAggregationBuilder.parseStringOffset(parser.text())); return true; } else { - otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD, parser.longValue()); + otherOptions.put(Histogram.OFFSET_FIELD, parser.longValue()); return true; } } else { diff --git a/core/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java similarity index 70% rename from core/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java rename to core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java index a71cc77ffc1..a601bd140e8 100644 --- a/core/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java +++ b/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java @@ -20,37 +20,13 @@ package org.elasticsearch.common.rounding; import org.elasticsearch.test.ESTestCase; +import org.joda.time.DateTimeZone; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class RoundingTests extends ESTestCase { - /** - * simple test case to illustrate how Rounding.Interval works on readable input - */ - public void testInterval() { - int interval = 10; - Rounding.Interval rounding = new Rounding.Interval(interval); - int value = 24; - final long r = rounding.round(24); - String message = "round(" + value + ", interval=" + interval + ") = " + r; - assertEquals(value/interval * interval, r); - assertEquals(message, 0, r % interval); - } - - public void testIntervalRandom() { - final long interval = randomIntBetween(1, 100); - Rounding.Interval rounding = new Rounding.Interval(interval); - for (int i = 0; i < 1000; ++i) { - long l = Math.max(randomLong(), Long.MIN_VALUE + interval); - final long r = rounding.round(l); - String message = "round(" + l + ", interval=" + interval + ") = " + r; - assertEquals(message, 0, r % interval); - assertThat(message, r, lessThanOrEqualTo(l)); - assertThat(message, r + interval, greaterThan(l)); - } - } +public class OffsetRoundingTests extends ESTestCase { /** * Simple test case to illustrate how Rounding.Offset works on readable input. @@ -60,7 +36,8 @@ public class RoundingTests extends ESTestCase { public void testOffsetRounding() { final long interval = 10; final long offset = 7; - Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(new Rounding.Interval(interval), offset); + Rounding.OffsetRounding rounding = new Rounding.OffsetRounding( + new TimeZoneRounding.TimeIntervalRounding(interval, DateTimeZone.UTC), offset); assertEquals(-3, rounding.round(6)); assertEquals(7, rounding.nextRoundingValue(-3)); assertEquals(7, rounding.round(7)); @@ -76,7 +53,7 @@ public class RoundingTests extends ESTestCase { public void testOffsetRoundingRandom() { for (int i = 0; i < 1000; ++i) { final long interval = randomIntBetween(1, 100); - Rounding.Interval internalRounding = new Rounding.Interval(interval); + Rounding internalRounding = new TimeZoneRounding.TimeIntervalRounding(interval, DateTimeZone.UTC); final long offset = randomIntBetween(-100, 100); Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(internalRounding, offset); long safetyMargin = Math.abs(interval) + Math.abs(offset); // to prevent range overflow From c14155e4a850d1e3e39d22d68fa33ad37efbd25b Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 3 Aug 2016 16:05:52 +0100 Subject: [PATCH 037/103] Remove TimeZoneRounding abstraction Because the Rounding class now only deals with date based rounding of values we can remove the TimeZoneRounding abstraction to simplify the code. --- .../common/rounding/Rounding.java | 285 +++++++++++++++- .../common/rounding/TimeZoneRounding.java | 309 ------------------ .../histogram/DateHistogramAggregator.java | 6 +- .../DateHistogramAggregatorFactory.java | 9 +- .../common/rounding/OffsetRoundingTests.java | 4 +- .../rounding/TimeZoneRoundingTests.java | 109 +++--- 6 files changed, 346 insertions(+), 376 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java diff --git a/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java b/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java index 4ddba09cfab..59acf468b86 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java @@ -22,6 +22,10 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.unit.TimeValue; +import org.joda.time.DateTimeField; +import org.joda.time.DateTimeZone; +import org.joda.time.IllegalInstantException; import java.io.IOException; import java.util.Objects; @@ -53,6 +57,279 @@ public abstract class Rounding implements Streamable { @Override public abstract int hashCode(); + public static Builder builder(DateTimeUnit unit) { + return new Builder(unit); + } + + public static Builder builder(TimeValue interval) { + return new Builder(interval); + } + + public static class Builder { + + private final DateTimeUnit unit; + private final long interval; + + private DateTimeZone timeZone = DateTimeZone.UTC; + + private long offset; + + public Builder(DateTimeUnit unit) { + this.unit = unit; + this.interval = -1; + } + + public Builder(TimeValue interval) { + this.unit = null; + if (interval.millis() < 1) + throw new IllegalArgumentException("Zero or negative time interval not supported"); + this.interval = interval.millis(); + } + + public Builder timeZone(DateTimeZone timeZone) { + if (timeZone == null) { + throw new IllegalArgumentException("Setting null as timezone is not supported"); + } + this.timeZone = timeZone; + return this; + } + + public Builder offset(long offset) { + this.offset = offset; + return this; + } + + public Rounding build() { + Rounding timeZoneRounding; + if (unit != null) { + timeZoneRounding = new TimeUnitRounding(unit, timeZone); + } else { + timeZoneRounding = new TimeIntervalRounding(interval, timeZone); + } + if (offset != 0) { + timeZoneRounding = new OffsetRounding(timeZoneRounding, offset); + } + return timeZoneRounding; + } + } + + static class TimeUnitRounding extends Rounding { + + static final byte ID = 1; + + private DateTimeUnit unit; + private DateTimeField field; + private DateTimeZone timeZone; + + TimeUnitRounding() { // for serialization + } + + TimeUnitRounding(DateTimeUnit unit, DateTimeZone timeZone) { + this.unit = unit; + this.field = unit.field(timeZone); + this.timeZone = timeZone; + } + + @Override + public byte id() { + return ID; + } + + @Override + public long round(long utcMillis) { + long rounded = field.roundFloor(utcMillis); + if (timeZone.isFixed() == false && timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) { + // in this case, we crossed a time zone transition. In some edge + // cases this will + // result in a value that is not a rounded value itself. We need + // to round again + // to make sure. This will have no affect in cases where + // 'rounded' was already a proper + // rounded value + rounded = field.roundFloor(rounded); + } + assert rounded == field.roundFloor(rounded); + return rounded; + } + + @Override + public long nextRoundingValue(long utcMillis) { + long floor = round(utcMillis); + // add one unit and round to get to next rounded value + long next = round(field.add(floor, 1)); + if (next == floor) { + // in rare case we need to add more than one unit + next = round(field.add(floor, 2)); + } + return next; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + unit = DateTimeUnit.resolve(in.readByte()); + timeZone = DateTimeZone.forID(in.readString()); + field = unit.field(timeZone); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(unit.id()); + out.writeString(timeZone.getID()); + } + + @Override + public int hashCode() { + return Objects.hash(unit, timeZone); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + TimeUnitRounding other = (TimeUnitRounding) obj; + return Objects.equals(unit, other.unit) && Objects.equals(timeZone, other.timeZone); + } + + @Override + public String toString() { + return "[" + timeZone + "][" + unit + "]"; + } + } + + static class TimeIntervalRounding extends Rounding { + + static final byte ID = 2; + + private long interval; + private DateTimeZone timeZone; + + TimeIntervalRounding() { // for serialization + } + + TimeIntervalRounding(long interval, DateTimeZone timeZone) { + if (interval < 1) + throw new IllegalArgumentException("Zero or negative time interval not supported"); + this.interval = interval; + this.timeZone = timeZone; + } + + @Override + public byte id() { + return ID; + } + + @Override + public long round(long utcMillis) { + long timeLocal = timeZone.convertUTCToLocal(utcMillis); + long rounded = roundKey(timeLocal, interval) * interval; + long roundedUTC; + if (isInDSTGap(rounded) == false) { + roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis); + // check if we crossed DST transition, in this case we want the + // last rounded value before the transition + long transition = timeZone.previousTransition(utcMillis); + if (transition != utcMillis && transition > roundedUTC) { + roundedUTC = round(transition - 1); + } + } else { + /* + * Edge case where the rounded local time is illegal and landed + * in a DST gap. In this case, we choose 1ms tick after the + * transition date. We don't want the transition date itself + * because those dates, when rounded themselves, fall into the + * previous interval. This would violate the invariant that the + * rounding operation should be idempotent. + */ + roundedUTC = timeZone.previousTransition(utcMillis) + 1; + } + return roundedUTC; + } + + private static long roundKey(long value, long interval) { + if (value < 0) { + return (value - interval + 1) / interval; + } else { + return value / interval; + } + } + + /** + * Determine whether the local instant is a valid instant in the given + * time zone. The logic for this is taken from + * {@link DateTimeZone#convertLocalToUTC(long, boolean)} for the + * `strict` mode case, but instead of throwing an + * {@link IllegalInstantException}, which is costly, we want to return a + * flag indicating that the value is illegal in that time zone. + */ + private boolean isInDSTGap(long instantLocal) { + if (timeZone.isFixed()) { + return false; + } + // get the offset at instantLocal (first estimate) + int offsetLocal = timeZone.getOffset(instantLocal); + // adjust instantLocal using the estimate and recalc the offset + int offset = timeZone.getOffset(instantLocal - offsetLocal); + // if the offsets differ, we must be near a DST boundary + if (offsetLocal != offset) { + // determine if we are in the DST gap + long nextLocal = timeZone.nextTransition(instantLocal - offsetLocal); + if (nextLocal == (instantLocal - offsetLocal)) { + nextLocal = Long.MAX_VALUE; + } + long nextAdjusted = timeZone.nextTransition(instantLocal - offset); + if (nextAdjusted == (instantLocal - offset)) { + nextAdjusted = Long.MAX_VALUE; + } + if (nextLocal != nextAdjusted) { + // we are in the DST gap + return true; + } + } + return false; + } + + @Override + public long nextRoundingValue(long time) { + long timeLocal = time; + timeLocal = timeZone.convertUTCToLocal(time); + long next = timeLocal + interval; + return timeZone.convertLocalToUTC(next, false); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + interval = in.readVLong(); + timeZone = DateTimeZone.forID(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(interval); + out.writeString(timeZone.getID()); + } + + @Override + public int hashCode() { + return Objects.hash(interval, timeZone); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + TimeIntervalRounding other = (TimeIntervalRounding) obj; + return Objects.equals(interval, other.interval) && Objects.equals(timeZone, other.timeZone); + } + } + public static class OffsetRounding extends Rounding { static final byte ID = 8; @@ -95,12 +372,12 @@ public abstract class Rounding implements Streamable { Rounding.Streams.write(rounding, out); out.writeLong(offset); } - + @Override public int hashCode() { return Objects.hash(rounding, offset); } - + @Override public boolean equals(Object obj) { if (obj == null) { @@ -126,8 +403,8 @@ public abstract class Rounding implements Streamable { Rounding rounding = null; byte id = in.readByte(); switch (id) { - case TimeZoneRounding.TimeUnitRounding.ID: rounding = new TimeZoneRounding.TimeUnitRounding(); break; - case TimeZoneRounding.TimeIntervalRounding.ID: rounding = new TimeZoneRounding.TimeIntervalRounding(); break; + case TimeUnitRounding.ID: rounding = new TimeUnitRounding(); break; + case TimeIntervalRounding.ID: rounding = new TimeIntervalRounding(); break; case OffsetRounding.ID: rounding = new OffsetRounding(); break; default: throw new ElasticsearchException("unknown rounding id [" + id + "]"); } diff --git a/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java b/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java deleted file mode 100644 index 5287203df69..00000000000 --- a/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.rounding; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; -import org.joda.time.DateTimeField; -import org.joda.time.DateTimeZone; -import org.joda.time.IllegalInstantException; - -import java.io.IOException; -import java.util.Objects; - -/** - * A rounding strategy for dates. It is typically used to group together dates - * that are part of the same hour/day/month, taking into account time zones and - * daylight saving times. - */ -public abstract class TimeZoneRounding extends Rounding { - - public static Builder builder(DateTimeUnit unit) { - return new Builder(unit); - } - - public static Builder builder(TimeValue interval) { - return new Builder(interval); - } - - public static class Builder { - - private final DateTimeUnit unit; - private final long interval; - - private DateTimeZone timeZone = DateTimeZone.UTC; - - private long offset; - - public Builder(DateTimeUnit unit) { - this.unit = unit; - this.interval = -1; - } - - public Builder(TimeValue interval) { - this.unit = null; - if (interval.millis() < 1) - throw new IllegalArgumentException("Zero or negative time interval not supported"); - this.interval = interval.millis(); - } - - public Builder timeZone(DateTimeZone timeZone) { - if (timeZone == null) { - throw new IllegalArgumentException("Setting null as timezone is not supported"); - } - this.timeZone = timeZone; - return this; - } - - public Builder offset(long offset) { - this.offset = offset; - return this; - } - - public Rounding build() { - Rounding timeZoneRounding; - if (unit != null) { - timeZoneRounding = new TimeUnitRounding(unit, timeZone); - } else { - timeZoneRounding = new TimeIntervalRounding(interval, timeZone); - } - if (offset != 0) { - timeZoneRounding = new OffsetRounding(timeZoneRounding, offset); - } - return timeZoneRounding; - } - } - - static class TimeUnitRounding extends TimeZoneRounding { - - static final byte ID = 1; - - private DateTimeUnit unit; - private DateTimeField field; - private DateTimeZone timeZone; - - TimeUnitRounding() { // for serialization - } - - TimeUnitRounding(DateTimeUnit unit, DateTimeZone timeZone) { - this.unit = unit; - this.field = unit.field(timeZone); - this.timeZone = timeZone; - } - - @Override - public byte id() { - return ID; - } - - @Override - public long round(long utcMillis) { - long rounded = field.roundFloor(utcMillis); - if (timeZone.isFixed() == false && timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) { - // in this case, we crossed a time zone transition. In some edge cases this will - // result in a value that is not a rounded value itself. We need to round again - // to make sure. This will have no affect in cases where 'rounded' was already a proper - // rounded value - rounded = field.roundFloor(rounded); - } - assert rounded == field.roundFloor(rounded); - return rounded; - } - - @Override - public long nextRoundingValue(long utcMillis) { - long floor = round(utcMillis); - // add one unit and round to get to next rounded value - long next = round(field.add(floor, 1)); - if (next == floor) { - // in rare case we need to add more than one unit - next = round(field.add(floor, 2)); - } - return next; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - unit = DateTimeUnit.resolve(in.readByte()); - timeZone = DateTimeZone.forID(in.readString()); - field = unit.field(timeZone); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeByte(unit.id()); - out.writeString(timeZone.getID()); - } - - @Override - public int hashCode() { - return Objects.hash(unit, timeZone); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TimeUnitRounding other = (TimeUnitRounding) obj; - return Objects.equals(unit, other.unit) - && Objects.equals(timeZone, other.timeZone); - } - - @Override - public String toString() { - return "[" + timeZone + "][" + unit +"]"; - } - } - - static class TimeIntervalRounding extends TimeZoneRounding { - - static final byte ID = 2; - - private long interval; - private DateTimeZone timeZone; - - TimeIntervalRounding() { // for serialization - } - - TimeIntervalRounding(long interval, DateTimeZone timeZone) { - if (interval < 1) - throw new IllegalArgumentException("Zero or negative time interval not supported"); - this.interval = interval; - this.timeZone = timeZone; - } - - @Override - public byte id() { - return ID; - } - - @Override - public long round(long utcMillis) { - long timeLocal = timeZone.convertUTCToLocal(utcMillis); - long rounded = roundKey(timeLocal, interval) * interval; - long roundedUTC; - if (isInDSTGap(rounded) == false) { - roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis); - // check if we crossed DST transition, in this case we want the last rounded value before the transition - long transition = timeZone.previousTransition(utcMillis); - if (transition != utcMillis && transition > roundedUTC) { - roundedUTC = round(transition - 1); - } - } else { - /* - * Edge case where the rounded local time is illegal and landed - * in a DST gap. In this case, we choose 1ms tick after the - * transition date. We don't want the transition date itself - * because those dates, when rounded themselves, fall into the - * previous interval. This would violate the invariant that the - * rounding operation should be idempotent. - */ - roundedUTC = timeZone.previousTransition(utcMillis) + 1; - } - return roundedUTC; - } - - private static long roundKey(long value, long interval) { - if (value < 0) { - return (value - interval + 1) / interval; - } else { - return value / interval; - } - } - - /** - * Determine whether the local instant is a valid instant in the given - * time zone. The logic for this is taken from - * {@link DateTimeZone#convertLocalToUTC(long, boolean)} for the - * `strict` mode case, but instead of throwing an - * {@link IllegalInstantException}, which is costly, we want to return a - * flag indicating that the value is illegal in that time zone. - */ - private boolean isInDSTGap(long instantLocal) { - if (timeZone.isFixed()) { - return false; - } - // get the offset at instantLocal (first estimate) - int offsetLocal = timeZone.getOffset(instantLocal); - // adjust instantLocal using the estimate and recalc the offset - int offset = timeZone.getOffset(instantLocal - offsetLocal); - // if the offsets differ, we must be near a DST boundary - if (offsetLocal != offset) { - // determine if we are in the DST gap - long nextLocal = timeZone.nextTransition(instantLocal - offsetLocal); - if (nextLocal == (instantLocal - offsetLocal)) { - nextLocal = Long.MAX_VALUE; - } - long nextAdjusted = timeZone.nextTransition(instantLocal - offset); - if (nextAdjusted == (instantLocal - offset)) { - nextAdjusted = Long.MAX_VALUE; - } - if (nextLocal != nextAdjusted) { - // we are in the DST gap - return true; - } - } - return false; - } - - @Override - public long nextRoundingValue(long time) { - long timeLocal = time; - timeLocal = timeZone.convertUTCToLocal(time); - long next = timeLocal + interval; - return timeZone.convertLocalToUTC(next, false); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - interval = in.readVLong(); - timeZone = DateTimeZone.forID(in.readString()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(interval); - out.writeString(timeZone.getID()); - } - - @Override - public int hashCode() { - return Objects.hash(interval, timeZone); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TimeIntervalRounding other = (TimeIntervalRounding) obj; - return Objects.equals(interval, other.interval) - && Objects.equals(timeZone, other.timeZone); - } - } -} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index cf8325683e2..8785d53e01e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -24,7 +24,6 @@ import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.rounding.Rounding; -import org.elasticsearch.common.rounding.TimeZoneRounding; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; @@ -45,8 +44,9 @@ import java.util.Map; /** * An aggregator for date values. Every date is rounded down using a configured - * {@link TimeZoneRounding}. - * @see TimeZoneRounding + * {@link Rounding}. + * + * @see Rounding */ class DateHistogramAggregator extends BucketsAggregator { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 17c6d82a9c3..2743989a4b5 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.common.rounding.Rounding; -import org.elasticsearch.common.rounding.TimeZoneRounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -95,19 +94,19 @@ public final class DateHistogramAggregatorFactory } private Rounding createRounding() { - TimeZoneRounding.Builder tzRoundingBuilder; + Rounding.Builder tzRoundingBuilder; if (dateHistogramInterval != null) { DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); if (dateTimeUnit != null) { - tzRoundingBuilder = TimeZoneRounding.builder(dateTimeUnit); + tzRoundingBuilder = Rounding.builder(dateTimeUnit); } else { // the interval is a time value? - tzRoundingBuilder = TimeZoneRounding.builder( + tzRoundingBuilder = Rounding.builder( TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval")); } } else { // the interval is an integer time value in millis? - tzRoundingBuilder = TimeZoneRounding.builder(TimeValue.timeValueMillis(interval)); + tzRoundingBuilder = Rounding.builder(TimeValue.timeValueMillis(interval)); } if (timeZone() != null) { tzRoundingBuilder.timeZone(timeZone()); diff --git a/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java index a601bd140e8..86e4e3b6cd6 100644 --- a/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java +++ b/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java @@ -37,7 +37,7 @@ public class OffsetRoundingTests extends ESTestCase { final long interval = 10; final long offset = 7; Rounding.OffsetRounding rounding = new Rounding.OffsetRounding( - new TimeZoneRounding.TimeIntervalRounding(interval, DateTimeZone.UTC), offset); + new Rounding.TimeIntervalRounding(interval, DateTimeZone.UTC), offset); assertEquals(-3, rounding.round(6)); assertEquals(7, rounding.nextRoundingValue(-3)); assertEquals(7, rounding.round(7)); @@ -53,7 +53,7 @@ public class OffsetRoundingTests extends ESTestCase { public void testOffsetRoundingRandom() { for (int i = 0; i < 1000; ++i) { final long interval = randomIntBetween(1, 100); - Rounding internalRounding = new TimeZoneRounding.TimeIntervalRounding(interval, DateTimeZone.UTC); + Rounding internalRounding = new Rounding.TimeIntervalRounding(interval, DateTimeZone.UTC); final long offset = randomIntBetween(-100, 100); Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(internalRounding, offset); long safetyMargin = Math.abs(interval) + Math.abs(offset); // to prevent range overflow diff --git a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java index d4920e9afe8..41f60556d9d 100644 --- a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java +++ b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java @@ -20,8 +20,8 @@ package org.elasticsearch.common.rounding; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.rounding.TimeZoneRounding.TimeIntervalRounding; -import org.elasticsearch.common.rounding.TimeZoneRounding.TimeUnitRounding; +import org.elasticsearch.common.rounding.Rounding.TimeIntervalRounding; +import org.elasticsearch.common.rounding.Rounding.TimeUnitRounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Description; @@ -47,29 +47,29 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; public class TimeZoneRoundingTests extends ESTestCase { public void testUTCTimeUnitRounding() { - Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).build(); + Rounding tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).build(); DateTimeZone tz = DateTimeZone.UTC; assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-01T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-01T00:00:00.000Z")), isDate(time("2009-03-01T00:00:00.000Z"), tz)); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build(); + tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build(); assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-09T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-16T00:00:00.000Z"), tz)); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).offset(-TimeValue.timeValueHours(24).millis()).build(); + tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).offset(-TimeValue.timeValueHours(24).millis()).build(); assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-08T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2012-01-08T00:00:00.000Z")), isDate(time("2012-01-15T00:00:00.000Z"), tz)); } public void testUTCIntervalRounding() { - Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(12)).build(); + Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).build(); DateTimeZone tz = DateTimeZone.UTC; assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-03T12:00:00.000Z"), tz)); assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T12:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-03T12:00:00.000Z")), isDate(time("2009-02-04T00:00:00.000Z"), tz)); - tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(48)).build(); + tzRounding = Rounding.builder(TimeValue.timeValueHours(48)).build(); assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-05T00:00:00.000Z"), tz)); assertThat(tzRounding.round(time("2009-02-05T13:01:01")), isDate(time("2009-02-05T00:00:00.000Z"), tz)); @@ -77,11 +77,11 @@ public class TimeZoneRoundingTests extends ESTestCase { } /** - * test TimeIntervalTimeZoneRounding, (interval < 12h) with time zone shift + * test TimeIntervalRounding, (interval < 12h) with time zone shift */ - public void testTimeIntervalTimeZoneRounding() { + public void testTimeIntervalRounding() { DateTimeZone tz = DateTimeZone.forOffsetHours(-1); - Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(6)).timeZone(tz).build(); + Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(6)).timeZone(tz).build(); assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T19:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-02T19:00:00.000Z")), isDate(time("2009-02-03T01:00:00.000Z"), tz)); @@ -90,11 +90,11 @@ public class TimeZoneRoundingTests extends ESTestCase { } /** - * test DayIntervalTimeZoneRounding, (interval >= 12h) with time zone shift + * test DayIntervalRounding, (interval >= 12h) with time zone shift */ - public void testDayIntervalTimeZoneRounding() { + public void testDayIntervalRounding() { DateTimeZone tz = DateTimeZone.forOffsetHours(-8); - Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(12)).timeZone(tz).build(); + Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).timeZone(tz).build(); assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T20:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-02T20:00:00.000Z")), isDate(time("2009-02-03T08:00:00.000Z"), tz)); @@ -102,37 +102,37 @@ public class TimeZoneRoundingTests extends ESTestCase { assertThat(tzRounding.nextRoundingValue(time("2009-02-03T08:00:00.000Z")), isDate(time("2009-02-03T20:00:00.000Z"), tz)); } - public void testDayTimeZoneRounding() { + public void testDayRounding() { int timezoneOffset = -2; - Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forOffsetHours(timezoneOffset)) + Rounding tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forOffsetHours(timezoneOffset)) .build(); assertThat(tzRounding.round(0), equalTo(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis())); assertThat(tzRounding.nextRoundingValue(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis()), equalTo(0L - TimeValue .timeValueHours(timezoneOffset).millis())); DateTimeZone tz = DateTimeZone.forID("-08:00"); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); assertThat(tzRounding.round(time("2012-04-01T04:15:30Z")), isDate(time("2012-03-31T08:00:00Z"), tz)); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); assertThat(tzRounding.round(time("2012-04-01T04:15:30Z")), equalTo(time("2012-03-01T08:00:00Z"))); // date in Feb-3rd, but still in Feb-2nd in -02:00 timezone tz = DateTimeZone.forID("-02:00"); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-02T02:00:00"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-02T02:00:00")), isDate(time("2009-02-03T02:00:00"), tz)); // date in Feb-3rd, also in -02:00 timezone - tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); assertThat(tzRounding.round(time("2009-02-03T02:01:01")), isDate(time("2009-02-03T02:00:00"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-03T02:00:00")), isDate(time("2009-02-04T02:00:00"), tz)); } - public void testTimeTimeZoneRounding() { + public void testTimeRounding() { // hour unit DateTimeZone tz = DateTimeZone.forOffsetHours(-2); - Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); + Rounding tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); assertThat(tzRounding.round(0), equalTo(0L)); assertThat(tzRounding.nextRoundingValue(0L), equalTo(TimeValue.timeValueHours(1L).getMillis())); @@ -144,23 +144,23 @@ public class TimeZoneRoundingTests extends ESTestCase { Rounding tzRounding; // testing savings to non savings switch DateTimeZone cet = DateTimeZone.forID("CET"); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); + tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); assertThat(tzRounding.round(time("2014-10-26T01:01:01", cet)), isDate(time("2014-10-26T01:00:00+02:00"), cet)); assertThat(tzRounding.nextRoundingValue(time("2014-10-26T01:00:00", cet)),isDate(time("2014-10-26T02:00:00+02:00"), cet)); assertThat(tzRounding.nextRoundingValue(time("2014-10-26T02:00:00", cet)), isDate(time("2014-10-26T02:00:00+01:00"), cet)); // testing non savings to savings switch - tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); + tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); assertThat(tzRounding.round(time("2014-03-30T01:01:01", cet)), isDate(time("2014-03-30T01:00:00+01:00"), cet)); assertThat(tzRounding.nextRoundingValue(time("2014-03-30T01:00:00", cet)), isDate(time("2014-03-30T03:00:00", cet), cet)); assertThat(tzRounding.nextRoundingValue(time("2014-03-30T03:00:00", cet)), isDate(time("2014-03-30T04:00:00", cet), cet)); // testing non savings to savings switch (America/Chicago) DateTimeZone chg = DateTimeZone.forID("America/Chicago"); - Rounding tzRounding_utc = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.UTC).build(); + Rounding tzRounding_utc = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.UTC).build(); assertThat(tzRounding.round(time("2014-03-09T03:01:01", chg)), isDate(time("2014-03-09T03:00:00", chg), chg)); - Rounding tzRounding_chg = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(chg).build(); + Rounding tzRounding_chg = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(chg).build(); assertThat(tzRounding_chg.round(time("2014-03-09T03:01:01", chg)), isDate(time("2014-03-09T03:00:00", chg), chg)); // testing savings to non savings switch 2013 (America/Chicago) @@ -173,18 +173,21 @@ public class TimeZoneRoundingTests extends ESTestCase { } /** - * Randomized test on TimeUnitRounding. - * Test uses random {@link DateTimeUnit} and {@link DateTimeZone} and often (50% of the time) chooses - * test dates that are exactly on or close to offset changes (e.g. DST) in the chosen time zone. + * Randomized test on TimeUnitRounding. Test uses random + * {@link DateTimeUnit} and {@link DateTimeZone} and often (50% of the time) + * chooses test dates that are exactly on or close to offset changes (e.g. + * DST) in the chosen time zone. * - * It rounds the test date down and up and performs various checks on the rounding unit interval that is - * defined by this. Assumptions tested are described in {@link #assertInterval(long, long, long, TimeZoneRounding, DateTimeZone)} + * It rounds the test date down and up and performs various checks on the + * rounding unit interval that is defined by this. Assumptions tested are + * described in + * {@link #assertInterval(long, long, long, Rounding, DateTimeZone)} */ - public void testTimeZoneRoundingRandom() { + public void testRoundingRandom() { for (int i = 0; i < 1000; ++i) { DateTimeUnit timeUnit = randomTimeUnit(); DateTimeZone tz = randomDateTimeZone(); - TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz); + Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); long date = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 long unitMillis = timeUnit.field(tz).getDurationField().getUnitMillis(); if (randomBoolean()) { @@ -226,7 +229,7 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testTimeIntervalCET_DST_End() { long interval = TimeUnit.MINUTES.toMillis(20); DateTimeZone tz = DateTimeZone.forID("CET"); - TimeZoneRounding rounding = new TimeIntervalRounding(interval, tz); + Rounding rounding = new TimeIntervalRounding(interval, tz); assertThat(rounding.round(time("2015-10-25T01:55:00+02:00")), isDate(time("2015-10-25T01:40:00+02:00"), tz)); assertThat(rounding.round(time("2015-10-25T02:15:00+02:00")), isDate(time("2015-10-25T02:00:00+02:00"), tz)); @@ -246,7 +249,7 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testTimeIntervalCET_DST_Start() { long interval = TimeUnit.MINUTES.toMillis(20); DateTimeZone tz = DateTimeZone.forID("CET"); - TimeZoneRounding rounding = new TimeIntervalRounding(interval, tz); + Rounding rounding = new TimeIntervalRounding(interval, tz); // test DST start assertThat(rounding.round(time("2016-03-27T01:55:00+01:00")), isDate(time("2016-03-27T01:40:00+01:00"), tz)); assertThat(rounding.round(time("2016-03-27T02:00:00+01:00")), isDate(time("2016-03-27T03:00:00+02:00"), tz)); @@ -263,7 +266,7 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testTimeInterval_Kathmandu_DST_Start() { long interval = TimeUnit.MINUTES.toMillis(20); DateTimeZone tz = DateTimeZone.forID("Asia/Kathmandu"); - TimeZoneRounding rounding = new TimeIntervalRounding(interval, tz); + Rounding rounding = new TimeIntervalRounding(interval, tz); assertThat(rounding.round(time("1985-12-31T23:55:00+05:30")), isDate(time("1985-12-31T23:40:00+05:30"), tz)); assertThat(rounding.round(time("1986-01-01T00:16:00+05:45")), isDate(time("1986-01-01T00:15:00+05:45"), tz)); assertThat(time("1986-01-01T00:15:00+05:45") - time("1985-12-31T23:40:00+05:30"), equalTo(TimeUnit.MINUTES.toMillis(20))); @@ -281,7 +284,7 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testIntervalRounding_NotDivisibleInteval() { DateTimeZone tz = DateTimeZone.forID("CET"); long interval = TimeUnit.MINUTES.toMillis(14); - TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz); + Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); assertThat(rounding.round(time("2016-03-27T01:41:00+01:00")), isDate(time("2016-03-27T01:30:00+01:00"), tz)); assertThat(rounding.round(time("2016-03-27T01:51:00+01:00")), isDate(time("2016-03-27T01:44:00+01:00"), tz)); @@ -298,7 +301,7 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testIntervalRounding_HalfDay_DST() { DateTimeZone tz = DateTimeZone.forID("CET"); long interval = TimeUnit.HOURS.toMillis(12); - TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz); + Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); assertThat(rounding.round(time("2016-03-26T01:00:00+01:00")), isDate(time("2016-03-26T00:00:00+01:00"), tz)); assertThat(rounding.round(time("2016-03-26T13:00:00+01:00")), isDate(time("2016-03-26T12:00:00+01:00"), tz)); @@ -316,7 +319,7 @@ public class TimeZoneRoundingTests extends ESTestCase { TimeUnit unit = randomFrom(new TimeUnit[] {TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS}); long interval = unit.toMillis(randomIntBetween(1, 365)); DateTimeZone tz = randomDateTimeZone(); - TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz); + Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); long mainDate = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 if (randomBoolean()) { mainDate = nastyDate(mainDate, tz, interval); @@ -356,8 +359,8 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testIntervalRoundingMonotonic_CET() { long interval = TimeUnit.MINUTES.toMillis(45); DateTimeZone tz = DateTimeZone.forID("CET"); - TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz); - List> expectedDates = new ArrayList>(); + Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); + List> expectedDates = new ArrayList<>(); // first date is the date to be rounded, second the expected result expectedDates.add(new Tuple<>("2011-10-30T01:40:00.000+02:00", "2011-10-30T01:30:00.000+02:00")); expectedDates.add(new Tuple<>("2011-10-30T02:02:30.000+02:00", "2011-10-30T01:30:00.000+02:00")); @@ -387,7 +390,7 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testAmbiguousHoursAfterDSTSwitch() { Rounding tzRounding; final DateTimeZone tz = DateTimeZone.forID("Asia/Jerusalem"); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); assertThat(tzRounding.round(time("2014-10-26T00:30:00+03:00")), isDate(time("2014-10-26T00:00:00+03:00"), tz)); assertThat(tzRounding.round(time("2014-10-26T01:30:00+03:00")), isDate(time("2014-10-26T01:00:00+03:00"), tz)); // the utc date for "2014-10-25T03:00:00+03:00" and "2014-10-25T03:00:00+02:00" is the same, local time turns back 1h here @@ -396,7 +399,7 @@ public class TimeZoneRoundingTests extends ESTestCase { assertThat(tzRounding.round(time("2014-10-26T02:30:00+02:00")), isDate(time("2014-10-26T02:00:00+02:00"), tz)); // Day interval - tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-11-11T00:00:00", tz), tz)); // DST on assertThat(tzRounding.round(time("2014-08-11T17:00:00", tz)), isDate(time("2014-08-11T00:00:00", tz), tz)); @@ -406,17 +409,17 @@ public class TimeZoneRoundingTests extends ESTestCase { assertThat(tzRounding.round(time("2015-03-27T17:00:00", tz)), isDate(time("2015-03-27T00:00:00", tz), tz)); // Month interval - tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-11-01T00:00:00", tz), tz)); // DST on assertThat(tzRounding.round(time("2014-10-10T17:00:00", tz)), isDate(time("2014-10-01T00:00:00", tz), tz)); // Year interval - tzRounding = TimeZoneRounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-01-01T00:00:00", tz), tz)); // Two timestamps in same year and different timezone offset ("Double buckets" issue - #9491) - tzRounding = TimeZoneRounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(tzRounding.round(time("2014-08-11T17:00:00", tz)), tz)); } @@ -429,8 +432,8 @@ public class TimeZoneRoundingTests extends ESTestCase { DateTimeZone tz = DateTimeZone.forID("America/Sao_Paulo"); long start = time("2014-10-18T20:50:00.000", tz); long end = time("2014-10-19T01:00:00.000", tz); - Rounding tzRounding = new TimeZoneRounding.TimeUnitRounding(DateTimeUnit.MINUTES_OF_HOUR, tz); - Rounding dayTzRounding = new TimeZoneRounding.TimeIntervalRounding(60000, tz); + Rounding tzRounding = new Rounding.TimeUnitRounding(DateTimeUnit.MINUTES_OF_HOUR, tz); + Rounding dayTzRounding = new Rounding.TimeIntervalRounding(60000, tz); for (long time = start; time < end; time = time + 60000) { assertThat(tzRounding.nextRoundingValue(time), greaterThan(time)); assertThat(dayTzRounding.nextRoundingValue(time), greaterThan(time)); @@ -442,7 +445,7 @@ public class TimeZoneRoundingTests extends ESTestCase { // standard +/-1 hour DST transition, CET DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; DateTimeZone tz = DateTimeZone.forID("CET"); - TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz); + Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); // 29 Mar 2015 - Daylight Saving Time Started // at 02:00:00 clocks were turned forward 1 hour to 03:00:00 @@ -466,7 +469,7 @@ public class TimeZoneRoundingTests extends ESTestCase { // which is not a round value for hourly rounding DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; DateTimeZone tz = DateTimeZone.forID("Asia/Kathmandu"); - TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz); + Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); assertInterval(time("1985-12-31T22:00:00.000+05:30"), time("1985-12-31T23:00:00.000+05:30"), rounding, 60, tz); assertInterval(time("1985-12-31T23:00:00.000+05:30"), time("1986-01-01T01:00:00.000+05:45"), rounding, 105, tz); @@ -479,7 +482,7 @@ public class TimeZoneRoundingTests extends ESTestCase { // at 02:00:00 clocks were turned backward 0:30 hours to Sunday, 3 March 1991, 01:30:00 DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; DateTimeZone tz = DateTimeZone.forID("Australia/Lord_Howe"); - TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz); + Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); assertInterval(time("1991-03-03T00:00:00.000+11:00"), time("1991-03-03T01:00:00.000+11:00"), rounding, 60, tz); assertInterval(time("1991-03-03T01:00:00.000+11:00"), time("1991-03-03T02:00:00.000+10:30"), rounding, 90, tz); @@ -499,7 +502,7 @@ public class TimeZoneRoundingTests extends ESTestCase { // at 03:45:00 clocks were turned backward 1 hour to 02:45:00 DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; DateTimeZone tz = DateTimeZone.forID("Pacific/Chatham"); - TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz); + Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); assertInterval(time("2015-04-05T02:00:00.000+13:45"), time("2015-04-05T03:00:00.000+13:45"), rounding, 60, tz); assertInterval(time("2015-04-05T03:00:00.000+13:45"), time("2015-04-05T03:00:00.000+12:45"), rounding, 60, tz); @@ -514,7 +517,7 @@ public class TimeZoneRoundingTests extends ESTestCase { } } - private static void assertInterval(long rounded, long nextRoundingValue, TimeZoneRounding rounding, int minutes, + private static void assertInterval(long rounded, long nextRoundingValue, Rounding rounding, int minutes, DateTimeZone tz) { assertInterval(rounded, dateBetween(rounded, nextRoundingValue), nextRoundingValue, rounding, tz); assertEquals(DateTimeConstants.MILLIS_PER_MINUTE * minutes, nextRoundingValue - rounded); @@ -527,7 +530,7 @@ public class TimeZoneRoundingTests extends ESTestCase { * @param nextRoundingValue the expected upper end of the rounding interval * @param rounding the rounding instance */ - private static void assertInterval(long rounded, long unrounded, long nextRoundingValue, TimeZoneRounding rounding, + private static void assertInterval(long rounded, long unrounded, long nextRoundingValue, Rounding rounding, DateTimeZone tz) { assert rounded <= unrounded && unrounded <= nextRoundingValue; assertThat("rounding should be idempotent ", rounding.round(rounded), isDate(rounded, tz)); From b6ef99195d4ac1d2b503e380ed147f8fef5940e3 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 4 Aug 2016 11:36:26 +0100 Subject: [PATCH 038/103] Remove offset rounding This is in favour of doing the offset calculations in the date histogram --- .../common/rounding/Rounding.java | 73 ------------------- .../histogram/DateHistogramAggregator.java | 13 ++-- .../DateHistogramAggregatorFactory.java | 4 +- .../histogram/InternalDateHistogram.java | 23 +++--- .../common/rounding/OffsetRoundingTests.java | 69 ------------------ .../rounding/TimeZoneRoundingTests.java | 4 - 6 files changed, 24 insertions(+), 162 deletions(-) delete mode 100644 core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java diff --git a/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java b/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java index 59acf468b86..ad9f926e881 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java @@ -72,8 +72,6 @@ public abstract class Rounding implements Streamable { private DateTimeZone timeZone = DateTimeZone.UTC; - private long offset; - public Builder(DateTimeUnit unit) { this.unit = unit; this.interval = -1; @@ -94,11 +92,6 @@ public abstract class Rounding implements Streamable { return this; } - public Builder offset(long offset) { - this.offset = offset; - return this; - } - public Rounding build() { Rounding timeZoneRounding; if (unit != null) { @@ -106,9 +99,6 @@ public abstract class Rounding implements Streamable { } else { timeZoneRounding = new TimeIntervalRounding(interval, timeZone); } - if (offset != 0) { - timeZoneRounding = new OffsetRounding(timeZoneRounding, offset); - } return timeZoneRounding; } } @@ -330,68 +320,6 @@ public abstract class Rounding implements Streamable { } } - public static class OffsetRounding extends Rounding { - - static final byte ID = 8; - - private Rounding rounding; - - private long offset; - - OffsetRounding() { // for serialization - } - - public OffsetRounding(Rounding intervalRounding, long offset) { - this.rounding = intervalRounding; - this.offset = offset; - } - - @Override - public byte id() { - return ID; - } - - @Override - public long round(long value) { - return rounding.round(value - offset) + offset; - } - - @Override - public long nextRoundingValue(long value) { - return rounding.nextRoundingValue(value - offset) + offset; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - rounding = Rounding.Streams.read(in); - offset = in.readLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - Rounding.Streams.write(rounding, out); - out.writeLong(offset); - } - - @Override - public int hashCode() { - return Objects.hash(rounding, offset); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - OffsetRounding other = (OffsetRounding) obj; - return Objects.equals(rounding, other.rounding) - && Objects.equals(offset, other.offset); - } - } - public static class Streams { public static void write(Rounding rounding, StreamOutput out) throws IOException { @@ -405,7 +333,6 @@ public abstract class Rounding implements Streamable { switch (id) { case TimeUnitRounding.ID: rounding = new TimeUnitRounding(); break; case TimeIntervalRounding.ID: rounding = new TimeIntervalRounding(); break; - case OffsetRounding.ID: rounding = new OffsetRounding(); break; default: throw new ElasticsearchException("unknown rounding id [" + id + "]"); } rounding.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 8785d53e01e..0ea2fba719b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -45,7 +45,7 @@ import java.util.Map; /** * An aggregator for date values. Every date is rounded down using a configured * {@link Rounding}. - * + * * @see Rounding */ class DateHistogramAggregator extends BucketsAggregator { @@ -60,14 +60,17 @@ class DateHistogramAggregator extends BucketsAggregator { private final ExtendedBounds extendedBounds; private final LongHash bucketOrds; + private long offset; - public DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, InternalOrder order, boolean keyed, + public DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, long offset, InternalOrder order, + boolean keyed, long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, AggregationContext aggregationContext, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); this.rounding = rounding; + this.offset = offset; this.order = order; this.keyed = keyed; this.minDocCount = minDocCount; @@ -100,7 +103,7 @@ class DateHistogramAggregator extends BucketsAggregator { long previousRounded = Long.MIN_VALUE; for (int i = 0; i < valuesCount; ++i) { long value = values.valueAt(i); - long rounded = rounding.round(value); + long rounded = rounding.round(value - offset) + offset; assert rounded >= previousRounded; if (rounded == previousRounded) { continue; @@ -133,7 +136,7 @@ class DateHistogramAggregator extends BucketsAggregator { InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; - return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, + return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); } @@ -142,7 +145,7 @@ class DateHistogramAggregator extends BucketsAggregator { InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; - return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, + return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, offset, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 2743989a4b5..79f81e28374 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -111,7 +111,7 @@ public final class DateHistogramAggregatorFactory if (timeZone() != null) { tzRoundingBuilder.timeZone(timeZone()); } - Rounding rounding = tzRoundingBuilder.offset(offset).build(); + Rounding rounding = tzRoundingBuilder.build(); return rounding; } @@ -137,7 +137,7 @@ public final class DateHistogramAggregatorFactory // parse any string bounds to longs and round them roundedBounds = extendedBounds.parseAndValidate(name, context.searchContext(), config.format()).round(rounding); } - return new DateHistogramAggregator(name, factories, rounding, order, keyed, minDocCount, roundedBounds, valuesSource, + return new DateHistogramAggregator(name, factories, rounding, offset, order, keyed, minDocCount, roundedBounds, valuesSource, config.format(), context, parent, pipelineAggregators, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 4d46c2c1850..56d3792e0c6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -178,14 +178,17 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< private final DocValueFormat format; private final boolean keyed; private final long minDocCount; + private final long offset; private final EmptyBucketInfo emptyBucketInfo; - InternalDateHistogram(String name, List buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo, + InternalDateHistogram(String name, List buckets, InternalOrder order, long minDocCount, long offset, + EmptyBucketInfo emptyBucketInfo, DocValueFormat formatter, boolean keyed, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.buckets = buckets; this.order = order; + this.offset = offset; assert (minDocCount == 0) == (emptyBucketInfo != null); this.minDocCount = minDocCount; this.emptyBucketInfo = emptyBucketInfo; @@ -205,6 +208,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< } else { emptyBucketInfo = null; } + offset = in.readLong(); format = in.readNamedWriteable(DocValueFormat.class); keyed = in.readBoolean(); buckets = in.readList(stream -> new Bucket(stream, keyed, format)); @@ -217,6 +221,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< if (minDocCount == 0) { emptyBucketInfo.writeTo(out); } + out.writeLong(offset); out.writeNamedWriteable(format); out.writeBoolean(keyed); out.writeList(buckets); @@ -234,7 +239,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< @Override public InternalDateHistogram create(List buckets) { - return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, format, + return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, format, keyed, pipelineAggregators(), metaData); } @@ -328,7 +333,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< long max = bounds.getMax(); while (key <= max) { iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs)); - key = emptyBucketInfo.rounding.nextRoundingValue(key); + key = nextKey(key).longValue(); } } } else { @@ -337,7 +342,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< if (key < firstBucket.key) { while (key < firstBucket.key) { iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs)); - key = emptyBucketInfo.rounding.nextRoundingValue(key); + key = nextKey(key).longValue(); } } } @@ -349,10 +354,10 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< while (iter.hasNext()) { Bucket nextBucket = list.get(iter.nextIndex()); if (lastBucket != null) { - long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key); + long key = nextKey(lastBucket.key).longValue(); while (key < nextBucket.key) { iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs)); - key = emptyBucketInfo.rounding.nextRoundingValue(key); + key = nextKey(key).longValue(); } assert key == nextBucket.key; } @@ -393,7 +398,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< CollectionUtil.introSort(reducedBuckets, order.comparator()); } - return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, + return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, offset, emptyBucketInfo, format, keyed, pipelineAggregators(), getMetaData()); } @@ -424,7 +429,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< @Override public Number nextKey(Number key) { - return emptyBucketInfo.rounding.nextRoundingValue(key.longValue()); + return emptyBucketInfo.rounding.nextRoundingValue(key.longValue() - offset) + offset; } @Override @@ -435,7 +440,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< buckets2.add((Bucket) b); } buckets2 = Collections.unmodifiableList(buckets2); - return new InternalDateHistogram(name, buckets2, order, minDocCount, emptyBucketInfo, format, + return new InternalDateHistogram(name, buckets2, order, minDocCount, offset, emptyBucketInfo, format, keyed, pipelineAggregators(), getMetaData()); } diff --git a/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java deleted file mode 100644 index 86e4e3b6cd6..00000000000 --- a/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.rounding; - -import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; - -public class OffsetRoundingTests extends ESTestCase { - - /** - * Simple test case to illustrate how Rounding.Offset works on readable input. - * offset shifts input value back before rounding (so here 6 - 7 -> -1) - * then shifts rounded Value back (here -10 -> -3) - */ - public void testOffsetRounding() { - final long interval = 10; - final long offset = 7; - Rounding.OffsetRounding rounding = new Rounding.OffsetRounding( - new Rounding.TimeIntervalRounding(interval, DateTimeZone.UTC), offset); - assertEquals(-3, rounding.round(6)); - assertEquals(7, rounding.nextRoundingValue(-3)); - assertEquals(7, rounding.round(7)); - assertEquals(17, rounding.nextRoundingValue(7)); - assertEquals(7, rounding.round(16)); - assertEquals(17, rounding.round(17)); - assertEquals(27, rounding.nextRoundingValue(17)); - } - - /** - * test OffsetRounding with an internal interval rounding on random inputs - */ - public void testOffsetRoundingRandom() { - for (int i = 0; i < 1000; ++i) { - final long interval = randomIntBetween(1, 100); - Rounding internalRounding = new Rounding.TimeIntervalRounding(interval, DateTimeZone.UTC); - final long offset = randomIntBetween(-100, 100); - Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(internalRounding, offset); - long safetyMargin = Math.abs(interval) + Math.abs(offset); // to prevent range overflow - long value = Math.max(randomLong() - safetyMargin, Long.MIN_VALUE + safetyMargin); - final long r_value = rounding.round(value); - final long nextRoundingValue = rounding.nextRoundingValue(r_value); - assertThat("Rounding should be idempotent", r_value, equalTo(rounding.round(r_value))); - assertThat("Rounded value smaller than unrounded, regardless of offset", r_value - offset, lessThanOrEqualTo(value - offset)); - assertThat("Rounded value <= value < next interval start", r_value + interval, greaterThan(value)); - assertThat("NextRounding value should be interval from rounded value", r_value + interval, equalTo(nextRoundingValue)); - } - } -} diff --git a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java index 41f60556d9d..ff83ddfa57d 100644 --- a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java +++ b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java @@ -55,10 +55,6 @@ public class TimeZoneRoundingTests extends ESTestCase { tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build(); assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-09T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-16T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).offset(-TimeValue.timeValueHours(24).millis()).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-08T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-08T00:00:00.000Z")), isDate(time("2012-01-15T00:00:00.000Z"), tz)); } public void testUTCIntervalRounding() { From f273981f37ed02158de818f510c7826e13cfe592 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 4 Aug 2016 16:28:12 +0100 Subject: [PATCH 039/103] Added failure message to test --- .../transport/client/PreBuiltTransportClientTests.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java index c519b29f9bc..fcc6f33ac6e 100644 --- a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java +++ b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java @@ -56,7 +56,8 @@ public class PreBuiltTransportClientTests extends RandomizedTest { new PreBuiltTransportClient(Settings.EMPTY, plugin); fail("exception expected"); } catch (IllegalArgumentException ex) { - assertTrue(ex.getMessage().startsWith("plugin already exists: ")); + assertTrue("Expected message to start with [plugin already exists: ] but was instead [" + ex.getMessage() + "]", + ex.getMessage().startsWith("plugin already exists: ")); } } } From f1110f6f2a831ce06609a83189e2896bb03813cb Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 4 Aug 2016 16:45:05 +0100 Subject: [PATCH 040/103] fix import statements --- .../transport/client/PreBuiltTransportClientTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java index fcc6f33ac6e..0b7c3380b94 100644 --- a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java +++ b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java @@ -32,8 +32,8 @@ import org.junit.Test; import java.util.Arrays; -import static org.junit.Assert.*; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class PreBuiltTransportClientTests extends RandomizedTest { From cd9388ce660a697bd499f4fb0d8377821656f429 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 4 Aug 2016 19:46:45 +0200 Subject: [PATCH 041/103] [TEST] parse query alternate versions in strict mode AbstractQueryTestCase parses the main version of the query in strict mode, meaning that it will fail if any deprecated syntax is used. It should do the same for alternate versions (e.g. short versions). This is the way it is because the two alternate versions for ids query are both deprecated. Moved testing for those to a specific test method that isolates the deprecations and actually tests that the two are deprecated. --- .../index/query/IdsQueryBuilderTests.java | 76 +++++++++++-------- .../test/AbstractQueryTestCase.java | 2 +- 2 files changed, 44 insertions(+), 34 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java index 42f4aaf56aa..4ad90edc8cb 100644 --- a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java @@ -23,13 +23,12 @@ package org.elasticsearch.index.query; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.containsString; @@ -94,37 +93,6 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase } } - @Override - protected Map getAlternateVersions() { - Map alternateVersions = new HashMap<>(); - - IdsQueryBuilder tempQuery = createTestQueryBuilder(); - if (tempQuery.types() != null && tempQuery.types().length > 0) { - String type = tempQuery.types()[0]; - IdsQueryBuilder testQuery = new IdsQueryBuilder(type); - - //single value type can also be called _type - String contentString1 = "{\n" + - " \"ids\" : {\n" + - " \"_type\" : \"" + type + "\",\n" + - " \"values\" : []\n" + - " }\n" + - "}"; - alternateVersions.put(contentString1, testQuery); - - //array of types can also be called type rather than types - String contentString2 = "{\n" + - " \"ids\" : {\n" + - " \"type\" : [\"" + type + "\"],\n" + - " \"values\" : []\n" + - " }\n" + - "}"; - alternateVersions.put(contentString2, testQuery); - } - - return alternateVersions; - } - public void testIllegalArguments() { try { new IdsQueryBuilder((String[])null); @@ -166,4 +134,46 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase assertEquals(json, 3, parsed.ids().size()); assertEquals(json, "my_type", parsed.types()[0]); } + + public void testFromJsonDeprecatedSyntax() throws IOException { + IdsQueryBuilder tempQuery = createTestQueryBuilder(); + assumeTrue("test requires at least one type", tempQuery.types() != null && tempQuery.types().length > 0); + + String type = tempQuery.types()[0]; + IdsQueryBuilder testQuery = new IdsQueryBuilder(type); + + //single value type can also be called _type + String contentString = "{\n" + + " \"ids\" : {\n" + + " \"_type\" : \"" + type + "\",\n" + + " \"values\" : []\n" + + " }\n" + + "}"; + + IdsQueryBuilder parsed = (IdsQueryBuilder) parseQuery(contentString, ParseFieldMatcher.EMPTY); + assertEquals(testQuery, parsed); + + try { + parseQuery(contentString); + fail("parse should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("Deprecated field [_type] used, expected [type] instead", e.getMessage()); + } + + //array of types can also be called type rather than types + contentString = "{\n" + + " \"ids\" : {\n" + + " \"types\" : [\"" + type + "\"],\n" + + " \"values\" : []\n" + + " }\n" + + "}"; + parsed = (IdsQueryBuilder) parseQuery(contentString, ParseFieldMatcher.EMPTY); + assertEquals(testQuery, parsed); + try { + parseQuery(contentString); + fail("parse should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("Deprecated field [types] used, expected [type] instead", e.getMessage()); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 92acd702e3a..f59afcb40d8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -245,7 +245,7 @@ public abstract class AbstractQueryTestCase> assertParsedQuery(shuffled.bytes(), testQuery); for (Map.Entry alternateVersion : getAlternateVersions().entrySet()) { String queryAsString = alternateVersion.getKey(); - assertParsedQuery(new BytesArray(queryAsString), alternateVersion.getValue(), ParseFieldMatcher.EMPTY); + assertParsedQuery(new BytesArray(queryAsString), alternateVersion.getValue()); } } } From 70100821123f53653417f7752f26d442b4fdafb3 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 4 Aug 2016 20:42:12 +0200 Subject: [PATCH 042/103] Add checksumming and versions to the Translog's Checkpoint files (#19797) This prepares the infrastructure to be able to extend the checkpoint file to store more information. --- .../index/translog/Checkpoint.java | 115 +++++++++++++----- .../index/translog/Translog.java | 4 +- .../translog/TruncateTranslogCommand.java | 13 +- 3 files changed, 91 insertions(+), 41 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java b/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java index 0fd59090944..d4eb9807827 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java @@ -18,16 +18,19 @@ */ package org.elasticsearch.index.translog; -import org.apache.lucene.store.ByteArrayDataOutput; +import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; -import org.apache.lucene.store.InputStreamDataInput; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.OutputStreamIndexOutput; +import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.common.io.Channels; +import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.InputStream; import java.nio.channels.FileChannel; -import java.nio.file.Files; import java.nio.file.OpenOption; import java.nio.file.Path; @@ -35,69 +38,117 @@ import java.nio.file.Path; */ class Checkpoint { - static final int BUFFER_SIZE = Integer.BYTES // ops - + Long.BYTES // offset - + Long.BYTES;// generation final long offset; final int numOps; final long generation; + private static final int INITIAL_VERSION = 1; // start with 1, just to recognize there was some magic serialization logic before + + private static final String CHECKPOINT_CODEC = "ckp"; + + static final int FILE_SIZE = CodecUtil.headerLength(CHECKPOINT_CODEC) + + Integer.BYTES // ops + + Long.BYTES // offset + + Long.BYTES // generation + + CodecUtil.footerLength(); + + static final int LEGACY_NON_CHECKSUMMED_FILE_LENGTH = Integer.BYTES // ops + + Long.BYTES // offset + + Long.BYTES; // generation + Checkpoint(long offset, int numOps, long generation) { this.offset = offset; this.numOps = numOps; this.generation = generation; } - Checkpoint(DataInput in) throws IOException { - offset = in.readLong(); - numOps = in.readInt(); - generation = in.readLong(); - } - - private void write(FileChannel channel) throws IOException { - byte[] buffer = new byte[BUFFER_SIZE]; - final ByteArrayDataOutput out = new ByteArrayDataOutput(buffer); - write(out); - Channels.writeToChannel(buffer, channel); - } - - void write(DataOutput out) throws IOException { + private void write(DataOutput out) throws IOException { out.writeLong(offset); out.writeInt(numOps); out.writeLong(generation); } + // reads a checksummed checkpoint introduced in ES 5.0.0 + static Checkpoint readChecksummedV1(DataInput in) throws IOException { + return new Checkpoint(in.readLong(), in.readInt(), in.readLong()); + } + + // reads checkpoint from ES < 5.0.0 + static Checkpoint readNonChecksummed(DataInput in) throws IOException { + return new Checkpoint(in.readLong(), in.readInt(), in.readLong()); + } + @Override public String toString() { return "Checkpoint{" + - "offset=" + offset + - ", numOps=" + numOps + - ", translogFileGeneration= " + generation + - '}'; + "offset=" + offset + + ", numOps=" + numOps + + ", translogFileGeneration= " + generation + + '}'; } public static Checkpoint read(Path path) throws IOException { - try (InputStream in = Files.newInputStream(path)) { - return new Checkpoint(new InputStreamDataInput(in)); + try (Directory dir = new SimpleFSDirectory(path.getParent())) { + try (final IndexInput indexInput = dir.openInput(path.getFileName().toString(), IOContext.DEFAULT)) { + if (indexInput.length() == LEGACY_NON_CHECKSUMMED_FILE_LENGTH) { + // OLD unchecksummed file that was written < ES 5.0.0 + return Checkpoint.readNonChecksummed(indexInput); + } + // We checksum the entire file before we even go and parse it. If it's corrupted we barf right here. + CodecUtil.checksumEntireFile(indexInput); + final int fileVersion = CodecUtil.checkHeader(indexInput, CHECKPOINT_CODEC, INITIAL_VERSION, INITIAL_VERSION); + return Checkpoint.readChecksummedV1(indexInput); + } } } public static void write(ChannelFactory factory, Path checkpointFile, Checkpoint checkpoint, OpenOption... options) throws IOException { + final ByteArrayOutputStream byteOutputStream = new ByteArrayOutputStream(FILE_SIZE) { + @Override + public synchronized byte[] toByteArray() { + // don't clone + return buf; + } + }; + final String resourceDesc = "checkpoint(path=\"" + checkpointFile + "\", gen=" + checkpoint + ")"; + try (final OutputStreamIndexOutput indexOutput = + new OutputStreamIndexOutput(resourceDesc, checkpointFile.toString(), byteOutputStream, FILE_SIZE)) { + CodecUtil.writeHeader(indexOutput, CHECKPOINT_CODEC, INITIAL_VERSION); + checkpoint.write(indexOutput); + CodecUtil.writeFooter(indexOutput); + + assert indexOutput.getFilePointer() == FILE_SIZE : + "get you number straights. Bytes written: " + indexOutput.getFilePointer() + " buffer size: " + FILE_SIZE; + assert indexOutput.getFilePointer() < 512 : + "checkpoint files have to be smaller 512b for atomic writes. size: " + indexOutput.getFilePointer(); + + } + // now go and write to the channel, in one go. try (FileChannel channel = factory.open(checkpointFile, options)) { - checkpoint.write(channel); + Channels.writeToChannel(byteOutputStream.toByteArray(), channel); + // no need to force metadata, file size stays the same and we did the full fsync + // when we first created the file, so the directory entry doesn't change as well channel.force(false); } } @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } Checkpoint that = (Checkpoint) o; - if (offset != that.offset) return false; - if (numOps != that.numOps) return false; + if (offset != that.offset) { + return false; + } + if (numOps != that.numOps) { + return false; + } return generation == that.generation; } diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index eb22c84cf0e..b6ace07a55d 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -200,7 +200,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC Files.createDirectories(location); final long generation = 1; Checkpoint checkpoint = new Checkpoint(0, 0, generation); - Checkpoint.write(getChannelFactory(), location.resolve(CHECKPOINT_FILE_NAME), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); + final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME); + Checkpoint.write(getChannelFactory(), checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); + IOUtils.fsync(checkpointFile, false); current = createWriter(generation); this.lastCommittedTranslogFileGeneration = NOT_SET_GENERATION; diff --git a/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index b6b91f14ba8..6514cd42709 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -36,11 +36,9 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.SettingCommand; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.translog.Checkpoint; import java.io.IOException; import java.nio.channels.Channels; @@ -168,12 +166,11 @@ public class TruncateTranslogCommand extends SettingCommand { /** Write a checkpoint file to the given location with the given generation */ public static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration) throws IOException { - try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); - OutputStreamDataOutput out = new OutputStreamDataOutput(Channels.newOutputStream(fc))) { - Checkpoint emptyCheckpoint = new Checkpoint(translogLength, 0, translogGeneration); - emptyCheckpoint.write(out); - fc.force(true); - } + Checkpoint emptyCheckpoint = new Checkpoint(translogLength, 0, translogGeneration); + Checkpoint.write(FileChannel::open, filename, emptyCheckpoint, + StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); + // fsync with metadata here to make sure. + IOUtils.fsync(filename, false); } /** From 4598c36027e883a8ace95a357d9863f96340d00d Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Thu, 4 Aug 2016 21:00:59 +0200 Subject: [PATCH 043/103] Fix various concurrency issues in transport (#19675) Due to various issues (most notably a missing happens-before edge between socket accept and channel close in MockTcpTransport), MockTcpTransportTests sometimes did not terminate. With this commit we fix various concurrency issues that led to this hanging test. Failing example build: https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+multijob-os-compatibility/os=oraclelinux/835/console --- .../elasticsearch/transport/TcpTransport.java | 25 +++---- .../AbstractSimpleTransportTestCase.java | 11 ++- .../transport/MockTcpTransport.java | 69 ++++++++++++++----- 3 files changed, 64 insertions(+), 41 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index 7145777aad7..d567e838138 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -93,7 +93,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.Supplier; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -768,12 +767,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i threadPool.generic().execute(() -> { globalLock.writeLock().lock(); try { - for (Iterator it = connectedNodes.values().iterator(); it.hasNext(); ) { - NodeChannels nodeChannels = it.next(); - it.remove(); - IOUtils.closeWhileHandlingException(nodeChannels); - } - + // first stop to accept any incoming connections so nobody can connect to this transport for (Map.Entry> entry : serverChannels.entrySet()) { try { closeChannels(entry.getValue()); @@ -781,16 +775,13 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i logger.debug("Error closing serverChannel for profile [{}]", e, entry.getKey()); } } - try { - stopInternal(); - } finally { - for (Iterator it = connectedNodes.values().iterator(); it.hasNext(); ) { - NodeChannels nodeChannels = it.next(); - it.remove(); - IOUtils.closeWhileHandlingException(nodeChannels); - } - } + for (Iterator it = connectedNodes.values().iterator(); it.hasNext(); ) { + NodeChannels nodeChannels = it.next(); + it.remove(); + IOUtils.closeWhileHandlingException(nodeChannels); + } + stopInternal(); } finally { globalLock.writeLock().unlock(); latch.countDown(); @@ -800,7 +791,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i try { latch.await(30, TimeUnit.SECONDS); } catch (InterruptedException e) { - Thread.interrupted(); + Thread.currentThread().interrupt(); // ignore } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index eb4dbb8bca5..33c5fcccad1 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -69,12 +69,12 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { protected ThreadPool threadPool; protected static final Version version0 = Version.CURRENT.minimumCompatibilityVersion(); - protected DiscoveryNode nodeA; - protected MockTransportService serviceA; + protected volatile DiscoveryNode nodeA; + protected volatile MockTransportService serviceA; protected static final Version version1 = Version.fromId(Version.CURRENT.id + 1); - protected DiscoveryNode nodeB; - protected MockTransportService serviceB; + protected volatile DiscoveryNode nodeB; + protected volatile MockTransportService serviceB; protected abstract MockTransportService build(Settings settings, Version version); @@ -489,9 +489,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { assertThat(latch.await(5, TimeUnit.SECONDS), equalTo(true)); } - @TestLogging("transport:DEBUG,transport.tracer:TRACE") - // boaz is on this - @AwaitsFix(bugUrl = "https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+multijob-os-compatibility/os=oraclelinux/835") public void testConcurrentSendRespondAndDisconnect() throws BrokenBarrierException, InterruptedException { Set sendingErrors = ConcurrentCollections.newConcurrentSet(); Set responseErrors = ConcurrentCollections.newConcurrentSet(); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index c128ee49022..da7fcc53410 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -122,7 +122,7 @@ public class MockTcpTransport extends TcpTransport try { started.await(); } catch (InterruptedException e) { - Thread.interrupted(); + Thread.currentThread().interrupt(); } return serverMockChannel; } @@ -261,6 +261,14 @@ public class MockTcpTransport extends TcpTransport private final CancellableThreads cancellableThreads = new CancellableThreads(); private final Closeable onClose; + /** + * Constructs a new MockChannel instance intended for handling the actual incoming / outgoing traffic. + * + * @param socket The client socket. Mut not be null. + * @param localAddress Address associated with the corresponding local server socket. Must not be null. + * @param profile The associated profile name. + * @param onClose Callback to execute when this channel is closed. + */ public MockChannel(Socket socket, InetSocketAddress localAddress, String profile, Consumer onClose) { this.localAddress = localAddress; this.activeChannel = socket; @@ -268,13 +276,44 @@ public class MockTcpTransport extends TcpTransport this.profile = profile; this.onClose = () -> onClose.accept(this); } + + /** + * Constructs a new MockChannel instance intended for accepting requests. + * + * @param serverSocket The associated server socket. Must not be null. + * @param profile The associated profile name. + */ + public MockChannel(ServerSocket serverSocket, String profile) { + this.localAddress = (InetSocketAddress) serverSocket.getLocalSocketAddress(); + this.serverSocket = serverSocket; + this.profile = profile; + this.activeChannel = null; + this.onClose = null; + } + public void accept(Executor executor) throws IOException { while (isOpen.get()) { - Socket accept = serverSocket.accept(); - configureSocket(accept); - MockChannel mockChannel = new MockChannel(accept, localAddress, profile, workerChannels::remove); - workerChannels.put(mockChannel, Boolean.TRUE); - mockChannel.loopRead(executor); + Socket incomingSocket = serverSocket.accept(); + MockChannel incomingChannel = null; + try { + configureSocket(incomingSocket); + incomingChannel = new MockChannel(incomingSocket, localAddress, profile, workerChannels::remove); + //establish a happens-before edge between closing and accepting a new connection + synchronized (this) { + if (isOpen.get()) { + workerChannels.put(incomingChannel, Boolean.TRUE); + // this spawns a new thread immediately, so OK under lock + incomingChannel.loopRead(executor); + // the channel is properly registered and will be cleared by the close code. + incomingSocket = null; + incomingChannel = null; + } + } + } finally { + // ensure we don't leak sockets and channels in the failure case. Note that we null both + // if there are no exceptions so this becomes a no op. + IOUtils.closeWhileHandlingException(incomingSocket, incomingChannel); + } } } @@ -294,26 +333,22 @@ public class MockTcpTransport extends TcpTransport @Override protected void doRun() throws Exception { StreamInput input = new InputStreamStreamInput(new BufferedInputStream(activeChannel.getInputStream())); - while (isOpen.get()) { + // There is a (slim) chance that we get interrupted right after a loop iteration, so check explicitly + while (isOpen.get() && !Thread.currentThread().isInterrupted()) { cancellableThreads.executeIO(() -> readMessage(MockChannel.this, input)); } } }); } - public MockChannel(ServerSocket serverSocket, String profile) { - this.localAddress = (InetSocketAddress) serverSocket.getLocalSocketAddress(); - this.serverSocket = serverSocket; - this.profile = profile; - this.activeChannel = null; - this.onClose = null; - } - @Override public void close() throws IOException { if (isOpen.compareAndSet(true, false)) { - IOUtils.close( () -> cancellableThreads.cancel("channel closed"), serverSocket, activeChannel, - () -> IOUtils.close(workerChannels.keySet()), onClose); + //establish a happens-before edge between closing and accepting a new connection + synchronized (this) { + IOUtils.close(serverSocket, activeChannel, () -> IOUtils.close(workerChannels.keySet()), + () -> cancellableThreads.cancel("channel closed"), onClose); + } } } } From 1e587406d8593f5443948d3ee464a4c470d550df Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 2 Aug 2016 17:35:31 -0400 Subject: [PATCH 044/103] Fail yaml tests and docs snippets that get unexpected warnings Adds `warnings` syntax to the yaml test that allows you to expect a `Warning` header that looks like: ``` - do: warnings: - '[index] is deprecated' - quotes are not required because yaml - but this argument is always a list, never a single string - no matter how many warnings you expect get: index: test type: test id: 1 ``` These are accessible from the docs with: ``` // TEST[warning:some warning] ``` This should help to force you to update the docs if you deprecate something. You *must* add the warnings marker to the docs or the build will fail. While you are there you *should* update the docs to add deprecation warnings visible in the rendered results. --- .../doc/RestTestsFromSnippetsTask.groovy | 18 ++++- .../gradle/doc/SnippetsTask.groovy | 11 +++- docs/README.asciidoc | 4 ++ docs/plugins/mapper-attachments.asciidoc | 14 ++-- docs/reference/indices/analyze.asciidoc | 2 +- .../reference/mapping/params/lat-lon.asciidoc | 21 ++++-- .../query-dsl/function-score-query.asciidoc | 12 ++-- .../query-dsl/indices-query.asciidoc | 5 +- docs/reference/query-dsl/mlt-query.asciidoc | 4 +- .../query-dsl/parent-id-query.asciidoc | 2 +- .../query-dsl/percolate-query.asciidoc | 2 +- .../reference/query-dsl/prefix-query.asciidoc | 3 +- .../query-dsl/template-query.asciidoc | 8 ++- .../search/request/highlighting.asciidoc | 10 +-- .../search/request/source-filtering.asciidoc | 7 +- .../test/lang_mustache/40_template_query.yaml | 22 ++++++- .../rest-api-spec/test/README.asciidoc | 20 +++++- .../rest-api-spec/test/mlt/20_docs.yaml | 6 +- .../test/search/10_source_filtering.yaml | 2 +- .../rest/yaml/ClientYamlTestResponse.java | 16 +++++ .../test/rest/yaml/Features.java | 1 + .../ClientYamlTestSuiteParseContext.java | 9 ++- .../rest/yaml/parser/DoSectionParser.java | 18 +++++ .../test/rest/yaml/section/DoSection.java | 63 +++++++++++++++++- .../yaml/parser/DoSectionParserTests.java | 58 +++++++++++++--- .../rest/yaml/section/DoSectionTests.java | 66 +++++++++++++++++++ 26 files changed, 347 insertions(+), 57 deletions(-) create mode 100644 test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index adea61cd4f3..100715586d3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -119,6 +119,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { current.println(" reason: $test.skipTest") } if (test.setup != null) { + // Insert a setup defined outside of the docs String setup = setups[test.setup] if (setup == null) { throw new InvalidUserDataException("Couldn't find setup " @@ -136,13 +137,23 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { response.contents.eachLine { current.println(" $it") } } - void emitDo(String method, String pathAndQuery, - String body, String catchPart, boolean inSetup) { + void emitDo(String method, String pathAndQuery, String body, + String catchPart, List warnings, boolean inSetup) { def (String path, String query) = pathAndQuery.tokenize('?') current.println(" - do:") if (catchPart != null) { current.println(" catch: $catchPart") } + if (false == warnings.isEmpty()) { + current.println(" warnings:") + for (String warning in warnings) { + // Escape " because we're going to quote the warning + String escaped = warning.replaceAll('"', '\\\\"') + /* Quote the warning in case it starts with [ which makes + * it look too much like an array. */ + current.println(" - \"$escaped\"") + } + } current.println(" raw:") current.println(" method: $method") current.println(" path: \"$path\"") @@ -200,7 +211,8 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { // Leading '/'s break the generated paths pathAndQuery = pathAndQuery.substring(1) } - emitDo(method, pathAndQuery, body, catchPart, inSetup) + emitDo(method, pathAndQuery, body, catchPart, snippet.warnings, + inSetup) } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy index afd91858e9d..749c0f916f8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy @@ -37,8 +37,9 @@ public class SnippetsTask extends DefaultTask { private static final String CATCH = /catch:\s*((?:\/[^\/]+\/)|[^ \]]+)/ private static final String SKIP = /skip:([^\]]+)/ private static final String SETUP = /setup:([^ \]]+)/ + private static final String WARNING = /warning:(.+)/ private static final String TEST_SYNTAX = - /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP) ?/ + /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP|$WARNING) ?/ /** * Action to take on each snippet. Called with a single parameter, an @@ -158,6 +159,10 @@ public class SnippetsTask extends DefaultTask { snippet.setup = it.group(6) return } + if (it.group(7) != null) { + snippet.warnings.add(it.group(7)) + return + } throw new InvalidUserDataException( "Invalid test marker: $line") } @@ -230,6 +235,7 @@ public class SnippetsTask extends DefaultTask { String language = null String catchPart = null String setup = null + List warnings = new ArrayList() @Override public String toString() { @@ -254,6 +260,9 @@ public class SnippetsTask extends DefaultTask { if (setup) { result += "[setup:$setup]" } + for (String warning in warnings) { + result += "[warning:$warning]" + } } if (testResponse) { result += '// TESTRESPONSE' diff --git a/docs/README.asciidoc b/docs/README.asciidoc index da07009d13d..5da211c6622 100644 --- a/docs/README.asciidoc +++ b/docs/README.asciidoc @@ -28,6 +28,10 @@ are tests even if they don't have `// CONSOLE`. * `// TEST[setup:name]`: Run some setup code before running the snippet. This is useful for creating and populating indexes used in the snippet. The setup code is defined in `docs/build.gradle`. + * `// TEST[warning:some warning]`: Expect the response to include a `Warning` + header. If the response doesn't include a `Warning` header with the exact + text then the test fails. If the response includes `Warning` headers that + aren't expected then the test fails. * `// TESTRESPONSE`: Matches this snippet against the body of the response of the last test. If the response is JSON then order is ignored. With `// TEST[continued]` you can make tests that contain multiple command snippets diff --git a/docs/plugins/mapper-attachments.asciidoc b/docs/plugins/mapper-attachments.asciidoc index f2c034a317e..119ec10c905 100644 --- a/docs/plugins/mapper-attachments.asciidoc +++ b/docs/plugins/mapper-attachments.asciidoc @@ -196,14 +196,14 @@ PUT /test "file" : { "type" : "attachment", "fields" : { - "content" : {"index" : "no"}, - "title" : {"store" : "yes"}, - "date" : {"store" : "yes"}, + "content" : {"index" : true}, + "title" : {"store" : true}, + "date" : {"store" : true}, "author" : {"analyzer" : "my_analyzer"}, - "keywords" : {"store" : "yes"}, - "content_type" : {"store" : "yes"}, - "content_length" : {"store" : "yes"}, - "language" : {"store" : "yes"} + "keywords" : {"store" : true}, + "content_type" : {"store" : true}, + "content_length" : {"store" : true}, + "language" : {"store" : true} } } } diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc index 5f75da11176..e5ed67bf12f 100644 --- a/docs/reference/indices/analyze.asciidoc +++ b/docs/reference/indices/analyze.asciidoc @@ -127,7 +127,7 @@ experimental[The format of the additional detail information is experimental and GET _analyze { "tokenizer" : "standard", - "token_filter" : ["snowball"], + "filter" : ["snowball"], "text" : "detailed output", "explain" : true, "attributes" : ["keyword"] <1> diff --git a/docs/reference/mapping/params/lat-lon.asciidoc b/docs/reference/mapping/params/lat-lon.asciidoc index c610d8a1771..88c91c30d06 100644 --- a/docs/reference/mapping/params/lat-lon.asciidoc +++ b/docs/reference/mapping/params/lat-lon.asciidoc @@ -1,6 +1,9 @@ [[lat-lon]] === `lat_lon` +deprecated[5.0.0, ????????] +// https://github.com/elastic/elasticsearch/issues/19792 + <> are usually performed by plugging the value of each <> field into a formula to determine whether it falls into the required area or not. Unlike most queries, the inverted index @@ -10,7 +13,7 @@ Setting `lat_lon` to `true` causes the latitude and longitude values to be indexed as numeric fields (called `.lat` and `.lon`). These fields can be used by the <> and <> queries instead of -performing in-memory calculations. +performing in-memory calculations. So this mapping: [source,js] -------------------------------------------------- @@ -27,8 +30,15 @@ PUT my_index } } } +-------------------------------------------------- +// TEST[warning:geo_point lat_lon parameter is deprecated and will be removed in the next major release] +<1> Setting `lat_lon` to true indexes the geo-point in the `location.lat` and `location.lon` fields. -PUT my_index/my_type/1 +Allows these actions: + +[source,js] +-------------------------------------------------- +PUT my_index/my_type/1?refresh { "location": { "lat": 41.12, @@ -46,18 +56,17 @@ GET my_index/_search "lon": -71 }, "distance": "50km", - "optimize_bbox": "indexed" <2> + "optimize_bbox": "indexed" <1> } } } -------------------------------------------------- // CONSOLE -<1> Setting `lat_lon` to true indexes the geo-point in the `location.lat` and `location.lon` fields. -<2> The `indexed` option tells the geo-distance query to use the inverted index instead of the in-memory calculation. +// TEST[continued] +<1> The `indexed` option tells the geo-distance query to use the inverted index instead of the in-memory calculation. Whether the in-memory or indexed operation performs better depends both on your dataset and on the types of queries that you are running. NOTE: The `lat_lon` option only makes sense for single-value `geo_point` fields. It will not work with arrays of geo-points. - diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index b1b6b56c2b2..0b6214396c9 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -18,7 +18,7 @@ GET /_search { "query": { "function_score": { - "query": {}, + "query": { "match_all": {} }, "boost": "5", "random_score": {}, <1> "boost_mode":"multiply" @@ -40,17 +40,17 @@ GET /_search { "query": { "function_score": { - "query": {}, + "query": { "match_all": {} }, "boost": "5", <1> "functions": [ { - "filter": {}, + "filter": { "match": { "test": "bar" } }, "random_score": {}, <2> "weight": 23 }, { - "filter": {}, - "weight": 42 + "filter": { "match": { "test": "cat" } }, + "weight": 42 } ], "max_boost": 42, @@ -170,7 +170,7 @@ you wish to inhibit this, set `"boost_mode": "replace"` The `weight` score allows you to multiply the score by the provided `weight`. This can sometimes be desired since boost value set on specific queries gets normalized, while for this score function it does -not. The number value is of type float. +not. The number value is of type float. [source,js] -------------------------------------------------- diff --git a/docs/reference/query-dsl/indices-query.asciidoc b/docs/reference/query-dsl/indices-query.asciidoc index 8f2f958086e..112a779e3f9 100644 --- a/docs/reference/query-dsl/indices-query.asciidoc +++ b/docs/reference/query-dsl/indices-query.asciidoc @@ -1,6 +1,8 @@ [[query-dsl-indices-query]] === Indices Query +deprecated[5.0.0, Search on the '_index' field instead] + The `indices` query is useful in cases where a search is executed across multiple indices. It allows to specify a list of index names and an inner query that is only executed for indices matching names on that list. @@ -20,7 +22,8 @@ GET /_search } } -------------------------------------------------- -// CONSOLE +// CONSOLE +// TEST[warning:indices query is deprecated. Instead search on the '_index' field] You can use the `index` field to provide a single index. diff --git a/docs/reference/query-dsl/mlt-query.asciidoc b/docs/reference/query-dsl/mlt-query.asciidoc index b132b49f234..8e23afdbb86 100644 --- a/docs/reference/query-dsl/mlt-query.asciidoc +++ b/docs/reference/query-dsl/mlt-query.asciidoc @@ -6,7 +6,7 @@ set of documents. In order to do so, MLT selects a set of representative terms of these input documents, forms a query using these terms, executes the query and returns the results. The user controls the input documents, how the terms should be selected and how the query is formed. `more_like_this` can be -shortened to `mlt` deprecated[5.0.0,use `more_like_this` instead). +shortened to `mlt` deprecated[5.0.0,use `more_like_this` instead]. The simplest use case consists of asking for documents that are similar to a provided piece of text. Here, we are asking for all movies that have some text @@ -175,7 +175,7 @@ follows a similar syntax to the `per_field_analyzer` parameter of the Additionally, to provide documents not necessarily present in the index, <> are also supported. -`unlike`:: +`unlike`:: The `unlike` parameter is used in conjunction with `like` in order not to select terms found in a chosen set of documents. In other words, we could ask for documents `like: "Apple"`, but `unlike: "cake crumble tree"`. The syntax diff --git a/docs/reference/query-dsl/parent-id-query.asciidoc b/docs/reference/query-dsl/parent-id-query.asciidoc index a7a28cf88e8..f662dc825c0 100644 --- a/docs/reference/query-dsl/parent-id-query.asciidoc +++ b/docs/reference/query-dsl/parent-id-query.asciidoc @@ -57,7 +57,7 @@ GET /my_index/_search { "query": { "has_parent": { - "type": "blog_post", + "parent_type": "blog_post", "query": { "term": { "_id": "1" diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index 0d079f6072c..2ccc84cabb9 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -19,7 +19,7 @@ PUT /my-index "doctype": { "properties": { "message": { - "type": "string" + "type": "keyword" } } }, diff --git a/docs/reference/query-dsl/prefix-query.asciidoc b/docs/reference/query-dsl/prefix-query.asciidoc index d2b75d10e5f..270fc925f0c 100644 --- a/docs/reference/query-dsl/prefix-query.asciidoc +++ b/docs/reference/query-dsl/prefix-query.asciidoc @@ -28,7 +28,7 @@ GET /_search -------------------------------------------------- // CONSOLE -Or : +Or with the `prefix` deprecated[5.0.0, Use `value`] syntax: [source,js] -------------------------------------------------- @@ -39,6 +39,7 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:Deprecated field [prefix] used, expected [value] instead] This multi term query allows you to control how it gets rewritten using the <> diff --git a/docs/reference/query-dsl/template-query.asciidoc b/docs/reference/query-dsl/template-query.asciidoc index f66dbe56d3e..b4b00e5babd 100644 --- a/docs/reference/query-dsl/template-query.asciidoc +++ b/docs/reference/query-dsl/template-query.asciidoc @@ -1,6 +1,8 @@ [[query-dsl-template-query]] === Template Query +deprecated[5.0.0, Use the <> API] + A query that accepts a query template and a map of key/value pairs to fill in template parameters. Templating is based on Mustache. For simple token substitution all you provide is a query containing some variable that you want to substitute and the actual @@ -21,6 +23,7 @@ GET /_search } ------------------------------------------ // CONSOLE +// TEST[warning:[template] query is deprecated, use search template api instead] The above request is translated into: @@ -54,6 +57,7 @@ GET /_search } ------------------------------------------ // CONSOLE +// TEST[warning:[template] query is deprecated, use search template api instead] <1> New line characters (`\n`) should be escaped as `\\n` or removed, and quotes (`"`) should be escaped as `\\"`. @@ -80,6 +84,7 @@ GET /_search } ------------------------------------------ // CONSOLE +// TEST[warning:[template] query is deprecated, use search template api instead] <1> Name of the query template in `config/scripts/`, i.e., `my_template.mustache`. @@ -113,11 +118,10 @@ GET /_search ------------------------------------------ // CONSOLE // TEST[continued] +// TEST[warning:[template] query is deprecated, use search template api instead] <1> Name of the query template in `config/scripts/`, i.e., `my_template.mustache`. There is also a dedicated `template` endpoint, allows you to template an entire search request. Please see <> for more details. - - diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index efb7053c179..2e7bc9f1805 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -52,9 +52,9 @@ It tries hard to reflect the query matching logic in terms of understanding word [WARNING] If you want to highlight a lot of fields in a lot of documents with complex queries this highlighter will not be fast. -In its efforts to accurately reflect query logic it creates a tiny in-memory index and re-runs the original query criteria through -Lucene's query execution planner to get access to low-level match information on the current document. -This is repeated for every field and every document that needs highlighting. If this presents a performance issue in your system consider using an alternative highlighter. +In its efforts to accurately reflect query logic it creates a tiny in-memory index and re-runs the original query criteria through +Lucene's query execution planner to get access to low-level match information on the current document. +This is repeated for every field and every document that needs highlighting. If this presents a performance issue in your system consider using an alternative highlighter. [[postings-highlighter]] ==== Postings highlighter @@ -387,7 +387,7 @@ GET /_search "match_phrase": { "content": { "query": "foo bar", - "phrase_slop": 1 + "slop": 1 } } }, @@ -413,7 +413,7 @@ GET /_search "match_phrase": { "content": { "query": "foo bar", - "phrase_slop": 1, + "slop": 1, "boost": 10.0 } } diff --git a/docs/reference/search/request/source-filtering.asciidoc b/docs/reference/search/request/source-filtering.asciidoc index 08625751eec..03c02538a0e 100644 --- a/docs/reference/search/request/source-filtering.asciidoc +++ b/docs/reference/search/request/source-filtering.asciidoc @@ -53,15 +53,16 @@ GET /_search -------------------------------------------------- // CONSOLE -Finally, for complete control, you can specify both include and exclude patterns: +Finally, for complete control, you can specify both `includes` and `excludes` +patterns: [source,js] -------------------------------------------------- GET /_search { "_source": { - "include": [ "obj1.*", "obj2.*" ], - "exclude": [ "*.description" ] + "includes": [ "obj1.*", "obj2.*" ], + "excludes": [ "*.description" ] }, "query" : { "term" : { "user" : "kimchy" } diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml index cf3e6883e45..2360dfc37f0 100644 --- a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml +++ b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml @@ -1,5 +1,7 @@ --- "Template query": + - skip: + features: warnings - do: index: @@ -23,54 +25,72 @@ - match: { acknowledged: true } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "inline": { "term": { "text": { "value": "{{template}}" } } }, "params": { "template": "value1" } } } } - match: { hits.total: 1 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "file": "file_query_template", "params": { "my_value": "value1" } } } } - match: { hits.total: 1 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "id": "1", "params": { "my_value": "value1" } } } } - match: { hits.total: 1 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "id": "/mustache/1", "params": { "my_value": "value1" } } } } - match: { hits.total: 1 } - do: - search: + warnings: + - '[template] query is deprecated, use search template api instead' + search: body: { "query": { "template": { "inline": {"match_{{template}}": {}}, "params" : { "template" : "all" } } } } - match: { hits.total: 2 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "inline": "{ \"term\": { \"text\": { \"value\": \"{{template}}\" } } }", "params": { "template": "value1" } } } } - match: { hits.total: 1 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "inline": "{\"match_{{template}}\": {}}", "params" : { "template" : "all" } } } } - match: { hits.total: 2 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "inline": "{\"match_all\": {}}", "params" : {} } } } - match: { hits.total: 2 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "inline": "{\"query_string\": { \"query\" : \"{{query}}\" }}", "params" : { "query" : "text:\"value2 value3\"" } } } } - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index 4e88cef4c9f..16b607f4a22 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -173,6 +173,25 @@ The argument to `catch` can be any of: If `catch` is specified, then the `response` var must be cleared, and the test should fail if no error is thrown. +If the arguments to `do` include `warnings` then we are expecting a `Warning` +header to come back from the request. If the arguments *don't* include a +`warnings` argument then we *don't* expect the response to include a `Warning` +header. The warnings must match exactly. Using it looks like this: + +.... + - do: + warnings: + - '[index] is deprecated' + - quotes are not required because yaml + - but this argument is always a list, never a single string + - no matter how many warnings you expect + get: + index: test + type: test + id: 1 +.... + + === `set` For some tests, it is necessary to extract a value from the previous `response`, in @@ -284,4 +303,3 @@ This depends on the datatype of the value being examined, eg: - length: { _tokens: 3 } # the `_tokens` array has 3 elements - length: { _source: 5 } # the `_source` hash has 5 keys .... - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yaml index 85d22a4dc23..415a38f00c1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yaml @@ -41,7 +41,7 @@ body: query: more_like_this: - docs: + like: - _index: test_1 _type: test @@ -51,8 +51,8 @@ _index: test_1 _type: test _id: 2 - ids: - - 3 + - + _id: 3 include: true min_doc_freq: 0 min_term_freq: 0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml index 424153aa573..48857522cb8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml @@ -72,7 +72,7 @@ setup: search: body: _source: - include: [ include.field1, include.field2 ] + includes: [ include.field1, include.field2 ] query: { match_all: {} } - match: { hits.hits.0._source.include.field1: v1 } - match: { hits.hits.0._source.include.field2: v2 } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java index 8f449274ea5..481ae752d05 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.test.rest.yaml; +import org.apache.http.Header; import org.apache.http.client.methods.HttpHead; import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Response; @@ -25,6 +26,8 @@ import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; /** * Response obtained from a REST call, eagerly reads the response body into a string for later optional parsing. @@ -70,6 +73,19 @@ public class ClientYamlTestResponse { return response.getStatusLine().getReasonPhrase(); } + /** + * Get a list of all of the values of all warning headers returned in the response. + */ + public List getWarningHeaders() { + List warningHeaders = new ArrayList<>(); + for (Header header : response.getHeaders()) { + if (header.getName().equals("Warning")) { + warningHeaders.add(header.getValue()); + } + } + return warningHeaders; + } + /** * Returns the body properly parsed depending on the content type. * Might be a string or a json object parsed as a map. diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index df797dd53dd..6e47220ec08 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -41,6 +41,7 @@ public final class Features { "groovy_scripting", "headers", "stash_in_path", + "warnings", "yaml")); private Features() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSuiteParseContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSuiteParseContext.java index 73fd57c3deb..e466c1010f6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSuiteParseContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSuiteParseContext.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.test.rest.yaml.parser; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -36,7 +38,7 @@ import java.util.Map; * Context shared across the whole tests parse phase. * Provides shared parse methods and holds information needed to parse the test sections (e.g. es version) */ -public class ClientYamlTestSuiteParseContext { +public class ClientYamlTestSuiteParseContext implements ParseFieldMatcherSupplier { private static final SetupSectionParser SETUP_SECTION_PARSER = new SetupSectionParser(); private static final TeardownSectionParser TEARDOWN_SECTION_PARSER = new TeardownSectionParser(); @@ -185,4 +187,9 @@ public class ClientYamlTestSuiteParseContext { Map.Entry entry = map.entrySet().iterator().next(); return Tuple.tuple(entry.getKey(), entry.getValue()); } + + @Override + public ParseFieldMatcher getParseFieldMatcher() { + return ParseFieldMatcher.STRICT; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParser.java index eda0f728f93..b89aa821ec9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParser.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.test.rest.yaml.parser; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -25,9 +26,13 @@ import org.elasticsearch.test.rest.yaml.section.ApiCallSection; import org.elasticsearch.test.rest.yaml.section.DoSection; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; +import static java.util.Collections.unmodifiableList; + /** * Parser for do sections */ @@ -44,6 +49,7 @@ public class DoSectionParser implements ClientYamlTestFragmentParser DoSection doSection = new DoSection(parseContext.parser().getTokenLocation()); ApiCallSection apiCallSection = null; Map headers = new HashMap<>(); + List expectedWarnings = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -52,6 +58,17 @@ public class DoSectionParser implements ClientYamlTestFragmentParser if ("catch".equals(currentFieldName)) { doSection.setCatch(parser.text()); } + } else if (token == XContentParser.Token.START_ARRAY) { + if ("warnings".equals(currentFieldName)) { + while ((token = parser.nextToken()) == XContentParser.Token.VALUE_STRING) { + expectedWarnings.add(parser.text()); + } + if (token != XContentParser.Token.END_ARRAY) { + throw new ParsingException(parser.getTokenLocation(), "[warnings] must be a string array but saw [" + token + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "unknown array [" + currentFieldName + "]"); + } } else if (token == XContentParser.Token.START_OBJECT) { if ("headers".equals(currentFieldName)) { String headerName = null; @@ -97,6 +114,7 @@ public class DoSectionParser implements ClientYamlTestFragmentParser apiCallSection.addHeaders(headers); } doSection.setApiCallSection(apiCallSection); + doSection.setExpectedWarningHeaders(unmodifiableList(expectedWarnings)); } finally { parser.nextToken(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 4bad7f57a3a..af4a8e4f51a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -29,8 +29,12 @@ import org.elasticsearch.test.rest.yaml.ClientYamlTestResponseException; import java.io.IOException; import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; import java.util.Map; +import java.util.Set; +import static java.util.Collections.emptyList; import static org.elasticsearch.common.collect.Tuple.tuple; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.allOf; @@ -49,6 +53,10 @@ import static org.junit.Assert.fail; * headers: * Authorization: Basic user:pass * Content-Type: application/json + * warnings: + * - Stuff is deprecated, yo + * - Don't use deprecated stuff + * - Please, stop. It hurts. * update: * index: test_1 * type: test @@ -63,6 +71,7 @@ public class DoSection implements ExecutableSection { private final XContentLocation location; private String catchParam; private ApiCallSection apiCallSection; + private List expectedWarningHeaders = emptyList(); public DoSection(XContentLocation location) { this.location = location; @@ -84,6 +93,22 @@ public class DoSection implements ExecutableSection { this.apiCallSection = apiCallSection; } + /** + * Warning headers that we expect from this response. If the headers don't match exactly this request is considered to have failed. + * Defaults to emptyList. + */ + public List getExpectedWarningHeaders() { + return expectedWarningHeaders; + } + + /** + * Set the warning headers that we expect from this response. If the headers don't match exactly this request is considered to have + * failed. Defaults to emptyList. + */ + public void setExpectedWarningHeaders(List expectedWarningHeaders) { + this.expectedWarningHeaders = expectedWarningHeaders; + } + @Override public XContentLocation getLocation() { return location; @@ -100,7 +125,7 @@ public class DoSection implements ExecutableSection { } try { - ClientYamlTestResponse restTestResponse = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), + ClientYamlTestResponse response = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), apiCallSection.getBodies(), apiCallSection.getHeaders()); if (Strings.hasLength(catchParam)) { String catchStatusCode; @@ -111,8 +136,9 @@ public class DoSection implements ExecutableSection { } else { throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported"); } - fail(formatStatusCodeMessage(restTestResponse, catchStatusCode)); + fail(formatStatusCodeMessage(response, catchStatusCode)); } + checkWarningHeaders(response.getWarningHeaders()); } catch(ClientYamlTestResponseException e) { ClientYamlTestResponse restTestResponse = e.getRestTestResponse(); if (!Strings.hasLength(catchParam)) { @@ -135,6 +161,39 @@ public class DoSection implements ExecutableSection { } } + /** + * Check that the response contains only the warning headers that we expect. + */ + void checkWarningHeaders(List warningHeaders) { + StringBuilder failureMessage = null; + // LinkedHashSet so that missing expected warnings come back in a predictable order which is nice for testing + Set expected = new LinkedHashSet<>(expectedWarningHeaders); + for (String header : warningHeaders) { + if (expected.remove(header)) { + // Was expected, all good. + continue; + } + if (failureMessage == null) { + failureMessage = new StringBuilder("got unexpected warning headers ["); + } + failureMessage.append('\n').append(header); + } + if (false == expected.isEmpty()) { + if (failureMessage == null) { + failureMessage = new StringBuilder(); + } else { + failureMessage.append("\n] "); + } + failureMessage.append("didn't get expected warning headers ["); + for (String header : expected) { + failureMessage.append('\n').append(header); + } + } + if (failureMessage != null) { + fail(failureMessage + "\n]"); + } + } + private void assertStatusCode(ClientYamlTestResponse restTestResponse) { Tuple> stringMatcherTuple = catches.get(catchParam); assertThat(formatStatusCodeMessage(restTestResponse, stringMatcherTuple.v1()), diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParserTests.java index 5d79432155f..0cecc508bf5 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParserTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParserTests.java @@ -29,8 +29,10 @@ import org.elasticsearch.test.rest.yaml.section.DoSection; import org.hamcrest.MatcherAssert; import java.io.IOException; +import java.util.Arrays; import java.util.Map; +import static java.util.Collections.singletonList; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -344,11 +346,11 @@ public class DoSectionParserTests extends AbstractParserTestCase { public void testParseDoSectionWithHeaders() throws Exception { parser = YamlXContent.yamlXContent.createParser( "headers:\n" + - " Authorization: \"thing one\"\n" + - " Content-Type: \"application/json\"\n" + - "indices.get_warmer:\n" + - " index: test_index\n" + - " name: test_warmer" + " Authorization: \"thing one\"\n" + + " Content-Type: \"application/json\"\n" + + "indices.get_warmer:\n" + + " index: test_index\n" + + " name: test_warmer" ); DoSectionParser doSectionParser = new DoSectionParser(); @@ -381,9 +383,9 @@ public class DoSectionParserTests extends AbstractParserTestCase { public void testParseDoSectionMultivaluedField() throws Exception { parser = YamlXContent.yamlXContent.createParser( "indices.get_field_mapping:\n" + - " index: test_index\n" + - " type: test_type\n" + - " field: [ text , text1 ]" + " index: test_index\n" + + " type: test_type\n" + + " field: [ text , text1 ]" ); DoSectionParser doSectionParser = new DoSectionParser(); @@ -400,6 +402,46 @@ public class DoSectionParserTests extends AbstractParserTestCase { assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0)); } + public void testParseDoSectionExpectedWarnings() throws Exception { + parser = YamlXContent.yamlXContent.createParser( + "indices.get_field_mapping:\n" + + " index: test_index\n" + + " type: test_type\n" + + "warnings:\n" + + " - some test warning they are typically pretty long\n" + + " - some other test warning somtimes they have [in] them" + ); + + DoSectionParser doSectionParser = new DoSectionParser(); + DoSection doSection = doSectionParser.parse(new ClientYamlTestSuiteParseContext("api", "suite", parser)); + + assertThat(doSection.getCatch(), nullValue()); + assertThat(doSection.getApiCallSection(), notNullValue()); + assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_field_mapping")); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); + assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); + assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type")); + assertThat(doSection.getApiCallSection().hasBody(), equalTo(false)); + assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0)); + assertThat(doSection.getExpectedWarningHeaders(), equalTo(Arrays.asList( + "some test warning they are typically pretty long", + "some other test warning somtimes they have [in] them"))); + + parser = YamlXContent.yamlXContent.createParser( + "indices.get_field_mapping:\n" + + " index: test_index\n" + + "warnings:\n" + + " - just one entry this time" + ); + + doSection = doSectionParser.parse(new ClientYamlTestSuiteParseContext("api", "suite", parser)); + assertThat(doSection.getCatch(), nullValue()); + assertThat(doSection.getApiCallSection(), notNullValue()); + assertThat(doSection.getExpectedWarningHeaders(), equalTo(singletonList( + "just one entry this time"))); + + } + private static void assertJsonEquals(Map actual, String expected) throws IOException { Map expectedMap; try (XContentParser parser = JsonXContent.jsonXContent.createParser(expected)) { diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java new file mode 100644 index 00000000000..e981b2d999c --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest.yaml.section; + +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Arrays; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; + +public class DoSectionTests extends ESTestCase { + public void testWarningHeaders() throws IOException { + DoSection section = new DoSection(new XContentLocation(1, 1)); + + // No warning headers doesn't throw an exception + section.checkWarningHeaders(emptyList()); + + // Any warning headers fail + AssertionError e = expectThrows(AssertionError.class, () -> section.checkWarningHeaders(singletonList("test"))); + assertEquals("got unexpected warning headers [\ntest\n]", e.getMessage()); + e = expectThrows(AssertionError.class, () -> section.checkWarningHeaders(Arrays.asList("test", "another", "some more"))); + assertEquals("got unexpected warning headers [\ntest\nanother\nsome more\n]", e.getMessage()); + + // But not when we expect them + section.setExpectedWarningHeaders(singletonList("test")); + section.checkWarningHeaders(singletonList("test")); + section.setExpectedWarningHeaders(Arrays.asList("test", "another", "some more")); + section.checkWarningHeaders(Arrays.asList("test", "another", "some more")); + + // But if you don't get some that you did expect, that is an error + section.setExpectedWarningHeaders(singletonList("test")); + e = expectThrows(AssertionError.class, () -> section.checkWarningHeaders(emptyList())); + assertEquals("didn't get expected warning headers [\ntest\n]", e.getMessage()); + section.setExpectedWarningHeaders(Arrays.asList("test", "another", "some more")); + e = expectThrows(AssertionError.class, () -> section.checkWarningHeaders(emptyList())); + assertEquals("didn't get expected warning headers [\ntest\nanother\nsome more\n]", e.getMessage()); + e = expectThrows(AssertionError.class, () -> section.checkWarningHeaders(Arrays.asList("test", "some more"))); + assertEquals("didn't get expected warning headers [\nanother\n]", e.getMessage()); + + // It is also an error if you get some warning you want and some you don't want + section.setExpectedWarningHeaders(Arrays.asList("test", "another", "some more")); + e = expectThrows(AssertionError.class, () -> section.checkWarningHeaders(Arrays.asList("test", "cat"))); + assertEquals("got unexpected warning headers [\ncat\n] didn't get expected warning headers [\nanother\nsome more\n]", + e.getMessage()); + } +} From e08f11dabc3ca04fbcded026aee512696dfe835d Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 4 Aug 2016 22:47:39 +0200 Subject: [PATCH 045/103] Remove BWC serialization logic for pre 2.2 nodes (#19810) This change removes all pre 2.2 logic from InternalSearchResponse serialization. It's unneeded in 5.0 since we require full cluster restart --- .../internal/InternalSearchResponse.java | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java index 26410cc9680..09a787ac3cb 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java @@ -136,14 +136,8 @@ public class InternalSearchResponse implements Streamable, ToXContent { suggest = Suggest.readSuggest(in); } timedOut = in.readBoolean(); - terminatedEarly = in.readOptionalBoolean(); - - if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) { - profileResults = new SearchProfileShardResults(in); - } else { - profileResults = null; - } + profileResults = in.readOptionalWriteable(SearchProfileShardResults::new); } @Override @@ -162,16 +156,7 @@ public class InternalSearchResponse implements Streamable, ToXContent { suggest.writeTo(out); } out.writeBoolean(timedOut); - out.writeOptionalBoolean(terminatedEarly); - - if (out.getVersion().onOrAfter(Version.V_2_2_0)) { - if (profileResults == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - profileResults.writeTo(out); - } - } + out.writeOptionalWriteable(profileResults); } } From 609a199bd49efb6542fd167f0591c9d6a2847727 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Fri, 5 Aug 2016 08:58:03 +0200 Subject: [PATCH 046/103] Upon being elected as master, prefer joins' node info to existing cluster state (#19743) When we introduces [persistent node ids](https://github.com/elastic/elasticsearch/pull/19140) we were concerned that people may copy data folders from one to another resulting in two nodes competing for the same id in the cluster. To solve this we elected to not allow an incoming join if a different with same id already exists in the cluster, or if some other node already has the same transport address as the incoming join. The rationeel there was that it is better to prefer existing nodes and that we can rely on node fault detection to remove any node from the cluster that isn't correct any more, making room for the node that wants to join (and will keep trying). Sadly there were two problems with this: 1) One minor and easy to fix - we didn't allow for the case where the existing node can have the same network address as the incoming one, but have a different ephemeral id (after node restart). This confused the logic in `AllocationService`, in this rare cases. The cluster is good enough to detect this and recover later on, but it's not clean. 2) The assumption that Node Fault Detection will clean up is *wrong* when the node just won an election (it wasn't master before) and needs to process the incoming joins in order to commit the cluster state and assume it's mastership. In those cases, the Node Fault Detection isn't active. This PR fixes these two and prefers incoming nodes to existing node when finishing an election. On top of the, on request by @ywelsch , `AllocationService` synchronization between the nodes of the cluster and it's routing table is now explicit rather than something we do all the time. The same goes for promotion of replicas to primaries. --- .../allocation/AllocationBenchmark.java | 2 +- .../elasticsearch/cluster/ClusterState.java | 5 +- .../cluster/node/DiscoveryNodes.java | 32 ++-- .../routing/allocation/AllocationService.java | 36 +++- .../cluster/service/ClusterService.java | 2 +- .../discovery/local/LocalDiscovery.java | 8 +- .../discovery/zen/NodeJoinController.java | 38 +++- .../discovery/zen/ZenDiscovery.java | 3 +- .../org/elasticsearch/tribe/TribeService.java | 2 +- .../node/tasks/TransportTasksActionTests.java | 2 +- .../cluster/reroute/ClusterRerouteTests.java | 2 +- .../shrink/TransportShrinkActionTests.java | 4 +- .../ingest/IngestProxyActionFilterTests.java | 2 +- .../TransportMultiSearchActionTests.java | 10 +- .../TransportBroadcastByNodeActionTests.java | 2 +- .../nodes/TransportNodesActionTests.java | 2 +- .../ClusterStateCreationUtils.java | 6 +- .../TransportClientHeadersTests.java | 2 +- .../cluster/ClusterChangedEventTests.java | 2 +- .../cluster/ClusterStateDiffIT.java | 6 +- .../cluster/ClusterStateTests.java | 2 +- .../cluster/NodeConnectionsServiceTests.java | 2 +- ...rdFailedClusterStateTaskExecutorTests.java | 2 +- .../MetaDataCreateIndexServiceTests.java | 4 +- .../cluster/node/DiscoveryNodesTests.java | 6 +- .../DelayedAllocationServiceTests.java | 32 ++-- .../cluster/routing/PrimaryTermsTests.java | 4 +- .../cluster/routing/RoutingTableTests.java | 2 +- .../cluster/routing/UnassignedInfoTests.java | 24 +-- .../allocation/ActiveAllocationIdTests.java | 8 +- .../allocation/AddIncrementallyTests.java | 10 +- .../allocation/AllocationCommandsTests.java | 19 +- .../allocation/AllocationPriorityTests.java | 2 +- .../allocation/AwarenessAllocationTests.java | 98 +++++----- .../allocation/BalanceConfigurationTests.java | 14 +- .../allocation/CatAllocationTestCase.java | 2 +- .../ClusterRebalanceRoutingTests.java | 32 ++-- .../ConcurrentRebalanceRoutingTests.java | 4 +- .../allocation/DeadNodesAllocationTests.java | 32 ++-- .../DecisionsImpactOnClusterHealthTests.java | 2 +- ...ReplicaAsPrimaryDuringRelocationTests.java | 6 +- .../ExpectedShardSizeAllocationTests.java | 6 +- .../allocation/FailedNodeRoutingTests.java | 4 +- .../allocation/FailedShardsRoutingTests.java | 22 +-- .../FilterAllocationDeciderTests.java | 2 +- .../allocation/FilterRoutingTests.java | 18 +- .../routing/allocation/IndexBalanceTests.java | 10 +- .../MaxRetryAllocationDeciderTests.java | 4 +- .../NodeVersionAllocationDeciderTests.java | 38 ++-- ...alPrimariesToRelocatingPrimariesTests.java | 8 +- .../PreferPrimaryAllocationTests.java | 2 +- .../PrimaryElectionRoutingTests.java | 14 +- ...yNotRelocatedWhileBeingRecoveredTests.java | 6 +- .../RandomAllocationDeciderTests.java | 12 +- .../allocation/RebalanceAfterActiveTests.java | 4 +- .../ReplicaAllocatedAfterPrimaryTests.java | 2 +- .../RoutingNodesIntegrityTests.java | 12 +- .../allocation/SameShardRoutingTests.java | 6 +- .../allocation/ShardVersioningTests.java | 2 +- .../ShardsLimitAllocationTests.java | 8 +- .../SingleShardNoReplicasRoutingTests.java | 18 +- .../SingleShardOneReplicaRoutingTests.java | 8 +- .../allocation/StartedShardsRoutingTests.java | 2 +- .../TenShardsOneReplicaRoutingTests.java | 6 +- .../allocation/ThrottlingAllocationTests.java | 18 +- .../UpdateNumberOfReplicasTests.java | 4 +- .../decider/DiskThresholdDeciderTests.java | 38 ++-- .../DiskThresholdDeciderUnitTests.java | 10 +- .../decider/EnableAllocationTests.java | 32 ++-- .../ClusterSerializationTests.java | 4 +- .../ClusterStateToStringTests.java | 2 +- .../structure/RoutingIteratorTests.java | 19 +- .../discovery/ZenFaultDetectionTests.java | 8 +- .../zen/NodeJoinControllerTests.java | 177 +++++++++++++----- ...eRemovalClusterStateTaskExecutorTests.java | 14 +- .../discovery/zen/ZenDiscoveryIT.java | 2 +- .../discovery/zen/ZenDiscoveryUnitTests.java | 6 +- .../zen/ping/unicast/UnicastZenPingIT.java | 8 +- .../PendingClusterStatesQueueTests.java | 2 +- .../PublishClusterStateActionTests.java | 38 ++-- .../gateway/AsyncShardFetchTests.java | 14 +- .../gateway/GatewayMetaStateTests.java | 4 +- .../gateway/PrimaryShardAllocatorTests.java | 10 +- .../gateway/ReplicaShardAllocatorTests.java | 4 +- .../indices/cluster/ClusterStateChanges.java | 9 +- ...ClusterStateServiceRandomUpdatesTests.java | 4 +- .../indices/state/RareClusterStateIT.java | 5 +- .../indices/store/IndicesStoreTests.java | 12 +- 88 files changed, 646 insertions(+), 487 deletions(-) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java index 5e5f35f6040..86902b380c8 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java @@ -145,7 +145,7 @@ public class AllocationBenchmark { RoutingTable routingTable = rb.build(); DiscoveryNodes.Builder nb = DiscoveryNodes.builder(); for (int i = 1; i <= numNodes; i++) { - nb.put(Allocators.newNode("node" + i, Collections.singletonMap("tag", "tag_" + (i % numTags)))); + nb.add(Allocators.newNode("node" + i, Collections.singletonMap("tag", "tag_" + (i % numTags)))); } initialClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData).routingTable(routingTable).nodes diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index 6745900057d..632179153f9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -625,6 +625,10 @@ public class ClusterState implements ToXContent, Diffable { return this; } + public DiscoveryNodes nodes() { + return nodes; + } + public Builder routingResult(RoutingAllocation.Result routingResult) { this.routingTable = routingResult.routingTable(); this.metaData = routingResult.metaData(); @@ -723,7 +727,6 @@ public class ClusterState implements ToXContent, Diffable { public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { return PROTO.readFrom(in, localNode); } - } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 68dedc433da..9d0edf7b910 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -357,14 +357,14 @@ public class DiscoveryNodes extends AbstractDiffable implements Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId); for (DiscoveryNode node : this) { if (newNodes.contains(node.getId())) { - builder.put(node); + builder.add(node); } } return builder.build(); } public DiscoveryNodes newNode(DiscoveryNode node) { - return new Builder(this).put(node).build(); + return new Builder(this).add(node).build(); } /** @@ -554,8 +554,8 @@ public class DiscoveryNodes extends AbstractDiffable implements node = localNode; } // some one already built this and validated it's OK, skip the n2 scans - assert builder.validatePut(node) == null : "building disco nodes from network doesn't pass preflight: " - + builder.validatePut(node); + assert builder.validateAdd(node) == null : "building disco nodes from network doesn't pass preflight: " + + builder.validateAdd(node); builder.putUnsafe(node); } return builder.build(); @@ -592,10 +592,10 @@ public class DiscoveryNodes extends AbstractDiffable implements /** * adds a disco node to the builder. Will throw an {@link IllegalArgumentException} if - * the supplied node doesn't pass the pre-flight checks performed by {@link #validatePut(DiscoveryNode)} + * the supplied node doesn't pass the pre-flight checks performed by {@link #validateAdd(DiscoveryNode)} */ - public Builder put(DiscoveryNode node) { - final String preflight = validatePut(node); + public Builder add(DiscoveryNode node) { + final String preflight = validateAdd(node); if (preflight != null) { throw new IllegalArgumentException(preflight); } @@ -603,6 +603,16 @@ public class DiscoveryNodes extends AbstractDiffable implements return this; } + /** + * Get a node by its id + * + * @param nodeId id of the wanted node + * @return wanted node if it exists. Otherwise null + */ + @Nullable public DiscoveryNode get(String nodeId) { + return nodes.get(nodeId); + } + private void putUnsafe(DiscoveryNode node) { nodes.put(node.getId(), node); } @@ -635,10 +645,10 @@ public class DiscoveryNodes extends AbstractDiffable implements * * @return null if all is OK or an error message explaining why a node can not be added. * - * Note: if this method returns a non-null value, calling {@link #put(DiscoveryNode)} will fail with an + * Note: if this method returns a non-null value, calling {@link #add(DiscoveryNode)} will fail with an * exception */ - private String validatePut(DiscoveryNode node) { + private String validateAdd(DiscoveryNode node) { for (ObjectCursor cursor : nodes.values()) { final DiscoveryNode existingNode = cursor.value; if (node.getAddress().equals(existingNode.getAddress()) && @@ -646,9 +656,9 @@ public class DiscoveryNodes extends AbstractDiffable implements return "can't add node " + node + ", found existing node " + existingNode + " with same address"; } if (node.getId().equals(existingNode.getId()) && - node.getAddress().equals(existingNode.getAddress()) == false) { + node.equals(existingNode) == false) { return "can't add node " + node + ", found existing node " + existingNode - + " with the same id, but a different address"; + + " with the same id but is a different node instance"; } } return null; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index f58bc22c63f..8745007e44f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -249,11 +249,36 @@ public class AllocationService extends AbstractComponent { applyFailedShard(allocation, failedShard, unassignedInfo); } gatewayAllocator.applyFailedShards(allocation); + reroute(allocation); String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.routingEntry.shardId().toString()); return buildResultAndLogHealthChange(allocation, "shards failed [" + failedShardsAsString + "] ..."); } + /** + * unassigned an shards that are associated with nodes that are no longer part of the cluster, potentially promoting replicas + * if needed. + */ + public RoutingAllocation.Result deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) { + RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); + // shuffle the unassigned nodes, just so we won't have things like poison failed shards + routingNodes.unassigned().shuffle(); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, + clusterInfoService.getClusterInfo(), currentNanoTime(), false); + + // first, clear from the shards any node id they used to belong to that is now dead + boolean changed = deassociateDeadNodes(allocation); + + if (reroute) { + changed |= reroute(allocation); + } + + if (!changed) { + return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); + } + return buildResultAndLogHealthChange(allocation, reason); + } + /** * Removes delay markers from unassigned shards based on current time stamp. Returns true if markers were removed. */ @@ -352,13 +377,9 @@ public class AllocationService extends AbstractComponent { } private boolean reroute(RoutingAllocation allocation) { - boolean changed = false; - // first, clear from the shards any node id they used to belong to that is now dead - changed |= deassociateDeadNodes(allocation); + assert deassociateDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See deassociateDeadNodes"; - // elect primaries *before* allocating unassigned, so backups of primaries that failed - // will be moved to primary state and not wait for primaries to be allocated and recovered (*from gateway*) - changed |= electPrimariesAndUnassignedDanglingReplicas(allocation); + boolean changed = electPrimariesAndUnassignedDanglingReplicas(allocation); // now allocate all the unassigned to available nodes if (allocation.routingNodes().unassigned().size() > 0) { @@ -390,8 +411,8 @@ public class AllocationService extends AbstractComponent { if (candidate != null) { shardEntry = unassignedIterator.demotePrimaryToReplicaShard(); ShardRouting primarySwappedCandidate = routingNodes.promoteAssignedReplicaShardToPrimary(candidate); + changed = true; if (primarySwappedCandidate.relocatingNodeId() != null) { - changed = true; // its also relocating, make sure to move the other routing to primary RoutingNode node = routingNodes.node(primarySwappedCandidate.relocatingNodeId()); if (node != null) { @@ -406,7 +427,6 @@ public class AllocationService extends AbstractComponent { IndexMetaData index = allocation.metaData().getIndexSafe(primarySwappedCandidate.index()); if (IndexMetaData.isIndexUsingShadowReplicas(index.getSettings())) { routingNodes.reinitShadowPrimary(primarySwappedCandidate); - changed = true; } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index 2dce2c82d90..21e2defd9b0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -156,7 +156,7 @@ public class ClusterService extends AbstractLifecycleComponent { public synchronized void setLocalNode(DiscoveryNode localNode) { assert clusterState.nodes().getLocalNodeId() == null : "local node is already set"; - DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.getId()); + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).add(localNode).localNodeId(localNode.getId()); this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); } diff --git a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java index 790d40b8f59..c544db4047f 100644 --- a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java @@ -134,7 +134,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov public ClusterState execute(ClusterState currentState) { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { - nodesBuilder.put(discovery.localNode()); + nodesBuilder.add(discovery.localNode()); } nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId()); // remove the NO_MASTER block in this case @@ -160,7 +160,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov public ClusterState execute(ClusterState currentState) { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { - nodesBuilder.put(discovery.localNode()); + nodesBuilder.add(discovery.localNode()); } nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId()); currentState = ClusterState.builder(currentState).nodes(nodesBuilder).build(); @@ -231,8 +231,8 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov } // reroute here, so we eagerly remove dead nodes from the routing ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build(); - RoutingAllocation.Result routingResult = master.allocationService.reroute( - ClusterState.builder(updatedState).build(), "elected as master"); + RoutingAllocation.Result routingResult = master.allocationService.deassociateDeadNodes( + ClusterState.builder(updatedState).build(), true, "node stopped"); return ClusterState.builder(updatedState).routingResult(routingResult).build(); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index ff5cdd4e31e..1f4f57c4ed4 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -413,8 +413,7 @@ public class NodeJoinController extends AbstractComponent { final DiscoveryNodes currentNodes = currentState.nodes(); boolean nodesChanged = false; - ClusterState.Builder newState = ClusterState.builder(currentState); - DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); + ClusterState.Builder newState; if (joiningNodes.size() == 1 && joiningNodes.get(0).equals(FINISH_ELECTION_TASK)) { return results.successes(joiningNodes).build(currentState); @@ -423,16 +422,17 @@ public class NodeJoinController extends AbstractComponent { // use these joins to try and become the master. // Note that we don't have to do any validation of the amount of joining nodes - the commit // during the cluster state publishing guarantees that we have enough - nodesBuilder.masterNodeId(currentNodes.getLocalNodeId()); - ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()) - .removeGlobalBlock(discoverySettings.getNoMasterBlock()).build(); - newState.blocks(clusterBlocks); + newState = becomeMasterAndTrimConflictingNodes(currentState, joiningNodes); nodesChanged = true; - } else if (nodesBuilder.isLocalNodeElectedMaster() == false) { + } else if (currentNodes.isLocalNodeElectedMaster() == false) { logger.trace("processing node joins, but we are not the master. current master: {}", currentNodes.getMasterNode()); throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not master for join request"); + } else { + newState = ClusterState.builder(currentState); } + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); + assert nodesBuilder.isLocalNodeElectedMaster(); // processing any joins @@ -443,7 +443,7 @@ public class NodeJoinController extends AbstractComponent { logger.debug("received a join request for an existing node [{}]", node); } else { try { - nodesBuilder.put(node); + nodesBuilder.add(node); nodesChanged = true; } catch (IllegalArgumentException e) { results.failure(node, e); @@ -468,6 +468,28 @@ public class NodeJoinController extends AbstractComponent { return results.build(newState.build()); } + private ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState currentState, List joiningNodes) { + assert currentState.nodes().getMasterNodeId() == null : currentState.prettyPrint(); + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes()); + nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); + ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()) + .removeGlobalBlock(discoverySettings.getNoMasterBlock()).build(); + for (final DiscoveryNode joiningNode : joiningNodes) { + final DiscoveryNode existingNode = nodesBuilder.get(joiningNode.getId()); + if (existingNode != null && existingNode.equals(joiningNode) == false) { + logger.debug("removing existing node [{}], which conflicts with incoming join from [{}]", existingNode, joiningNode); + nodesBuilder.remove(existingNode.getId()); + } + } + + // now trim any left over dead nodes - either left there when the previous master stepped down + // or removed by us above + ClusterState tmpState = ClusterState.builder(currentState).nodes(nodesBuilder).blocks(clusterBlocks).build(); + RoutingAllocation.Result result = allocationService.deassociateDeadNodes(tmpState, false, + "removed dead nodes on election"); + return ClusterState.builder(tmpState).routingResult(result); + } + @Override public boolean runOnlyOnMaster() { // we validate that we are allowed to change the cluster state during cluster state processing diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 3b176f7eff9..d380b1fd601 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -570,7 +570,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover if (!electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes())) { return resultBuilder.build(rejoin.apply(remainingNodesClusterState, "not enough master nodes")); } else { - final RoutingAllocation.Result routingResult = allocationService.reroute(remainingNodesClusterState, describeTasks(tasks)); + final RoutingAllocation.Result routingResult = + allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks)); return resultBuilder.build(ClusterState.builder(remainingNodesClusterState).routingResult(routingResult).build()); } } diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index eb44a897386..40c805e0b00 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -382,7 +382,7 @@ public class TribeService extends AbstractLifecycleComponent { clusterStateChanged = true; logger.info("[{}] adding node [{}]", tribeName, discoNode); nodes.remove(tribe.getId()); // remove any existing node with the same id but different ephemeral id - nodes.put(discoNode); + nodes.add(discoNode); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 2c78786ab04..955957abfb6 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -739,7 +739,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // First group by node DiscoveryNodes.Builder discoNodes = DiscoveryNodes.builder(); for (TestNode testNode : this.testNodes) { - discoNodes.put(testNode.discoveryNode); + discoNodes.add(testNode.discoveryNode); } response.setDiscoveryNodes(discoNodes.build()); Map byNodes = serialize(response, new ToXContent.MapParams(Collections.singletonMap("group_by", "nodes"))); diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java index a88c32aa43d..b3b91e6bfd8 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java @@ -166,7 +166,7 @@ public class ClusterRerouteTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING .getDefault(Settings.EMPTY)) .metaData(metaData).routingTable(routingTable).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))) + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) .build(); RoutingTable prevRoutingTable = routingTable; routingTable = service.reroute(clusterState, "reroute").routingTable(); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java index 3fcade05839..49d0ce447ba 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java @@ -93,7 +93,7 @@ public class TransportShrinkActionTests extends ESTestCase { // create one that won't fail ClusterState clusterState = ClusterState.builder(createClusterState("source", randomIntBetween(2, 10), 0, - Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().put(newNode("node1"))) + Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), @@ -116,7 +116,7 @@ public class TransportShrinkActionTests extends ESTestCase { ClusterState clusterState = ClusterState.builder(createClusterState(indexName, randomIntBetween(2, 10), 0, Settings.builder() .put("index.blocks.write", true) - .build())).nodes(DiscoveryNodes.builder().put(newNode("node1"))) + .build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java index 3d1a1a1c69d..a602465197a 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java @@ -79,7 +79,7 @@ public class IngestProxyActionFilterTests extends ESTestCase { roles.add(DiscoveryNode.Role.INGEST); } DiscoveryNode node = new DiscoveryNode(nodeId, nodeId, LocalTransportAddress.buildUnique(), attributes, roles, VersionUtils.randomVersion(random())); - builder.put(node); + builder.add(node); if (i == totalNodes - 1) { localNode = node; } diff --git a/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index d751424ef72..011fb172514 100644 --- a/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -37,16 +37,12 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.ArrayList; import java.util.Collections; -import java.util.Iterator; -import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -117,12 +113,12 @@ public class TransportMultiSearchActionTests extends ESTestCase { int numDataNodes = randomIntBetween(1, 10); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); for (int i = 0; i < numDataNodes; i++) { - builder.put(new DiscoveryNode("_id" + i, new LocalTransportAddress("_id" + i), Collections.emptyMap(), + builder.add(new DiscoveryNode("_id" + i, new LocalTransportAddress("_id" + i), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT)); } - builder.put(new DiscoveryNode("master", new LocalTransportAddress("mater"), Collections.emptyMap(), + builder.add(new DiscoveryNode("master", new LocalTransportAddress("mater"), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT)); - builder.put(new DiscoveryNode("ingest", new LocalTransportAddress("ingest"), Collections.emptyMap(), + builder.add(new DiscoveryNode("ingest", new LocalTransportAddress("ingest"), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.INGEST), Version.CURRENT)); ClusterState state = ClusterState.builder(new ClusterName("_name")).nodes(builder).build(); diff --git a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 603ad664ec3..7b237383034 100644 --- a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -220,7 +220,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { int totalIndexShards = 0; for (int i = 0; i < numberOfNodes; i++) { final DiscoveryNode node = newNode(i); - discoBuilder = discoBuilder.put(node); + discoBuilder = discoBuilder.add(node); int numberOfShards = randomIntBetween(1, 10); totalIndexShards += numberOfShards; for (int j = 0; j < numberOfShards; j++) { diff --git a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index ae8ea4a0b95..744a116f4a7 100644 --- a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -190,7 +190,7 @@ public class TransportNodesActionTests extends ESTestCase { attributes.put("custom", randomBoolean() ? "match" : randomAsciiOfLengthBetween(3, 5)); } final DiscoveryNode node = newNode(i, attributes, roles); - discoBuilder = discoBuilder.put(node); + discoBuilder = discoBuilder.add(node); discoveryNodes.add(node); } discoBuilder.localNodeId(randomFrom(discoveryNodes).getId()); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index 7496bb85faf..813e4f630c4 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -81,7 +81,7 @@ public class ClusterStateCreationUtils { Set unassignedNodes = new HashSet<>(); for (int i = 0; i < numberOfNodes + 1; i++) { final DiscoveryNode node = newNode(i); - discoBuilder = discoBuilder.put(node); + discoBuilder = discoBuilder.add(node); unassignedNodes.add(node.getId()); } discoBuilder.localNodeId(newNode(0).getId()); @@ -153,7 +153,7 @@ public class ClusterStateCreationUtils { DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); for (int i = 0; i < numberOfNodes + 1; i++) { final DiscoveryNode node = newNode(i); - discoBuilder = discoBuilder.put(node); + discoBuilder = discoBuilder.add(node); } discoBuilder.localNodeId(newNode(0).getId()); discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures @@ -241,7 +241,7 @@ public class ClusterStateCreationUtils { public static ClusterState state(DiscoveryNode localNode, DiscoveryNode masterNode, DiscoveryNode... allNodes) { DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); for (DiscoveryNode node : allNodes) { - discoBuilder.put(node); + discoBuilder.add(node); } if (masterNode != null) { discoBuilder.masterNodeId(masterNode.getId()); diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java index 282f929ff24..e736e4b86a1 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java @@ -131,7 +131,7 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase { ClusterName cluster1 = new ClusterName("cluster1"); ClusterState.Builder builder = ClusterState.builder(cluster1); //the sniffer detects only data nodes - builder.nodes(DiscoveryNodes.builder().put(new DiscoveryNode("node_id", address, Collections.emptyMap(), + builder.nodes(DiscoveryNodes.builder().add(new DiscoveryNode("node_id", address, Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT))); ((TransportResponseHandler) handler) .handleResponse(new ClusterStateResponse(cluster1, builder.build())); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java index 555f23813cb..6326d96f317 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java @@ -310,7 +310,7 @@ public class ClusterChangedEventTests extends ESTestCase { } } final DiscoveryNode node = newNode(nodeId, roles); - builder.put(node); + builder.add(node); if (i == localNodeIndex) { builder.localNodeId(nodeId); } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 9ffabec6fc0..1a7a2093a58 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -78,7 +78,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase { emptyMap(), emptySet(), Version.CURRENT); DiscoveryNode otherNode = new DiscoveryNode("other", new LocalTransportAddress("other"), emptyMap(), emptySet(), Version.CURRENT); - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(masterNode).put(otherNode).localNodeId(masterNode.getId()).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(masterNode).add(otherNode).localNodeId(masterNode.getId()).build(); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode); @@ -193,14 +193,14 @@ public class ClusterStateDiffIT extends ESIntegTestCase { if (nodeId.startsWith("node-")) { nodes.remove(nodeId); if (randomBoolean()) { - nodes.put(new DiscoveryNode(nodeId, new LocalTransportAddress(randomAsciiOfLength(10)), emptyMap(), + nodes.add(new DiscoveryNode(nodeId, new LocalTransportAddress(randomAsciiOfLength(10)), emptyMap(), emptySet(), randomVersion(random()))); } } } int additionalNodeCount = randomIntBetween(1, 20); for (int i = 0; i < additionalNodeCount; i++) { - nodes.put(new DiscoveryNode("node-" + randomAsciiOfLength(10), new LocalTransportAddress(randomAsciiOfLength(10)), + nodes.add(new DiscoveryNode("node-" + randomAsciiOfLength(10), new LocalTransportAddress(randomAsciiOfLength(10)), emptyMap(), emptySet(), randomVersion(random()))); } return ClusterState.builder(clusterState).nodes(nodes); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 6b99e525cb2..b0cba5bf1de 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -35,7 +35,7 @@ public class ClusterStateTests extends ESTestCase { final Version version = Version.CURRENT; final DiscoveryNode node1 = new DiscoveryNode("node1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), version); final DiscoveryNode node2 = new DiscoveryNode("node2", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), version); - final DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).put(node2).build(); + final DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build(); ClusterName name = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); ClusterState noMaster1 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); ClusterState noMaster2 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index b0bc3ee2e4e..ed7a20dc87e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -73,7 +73,7 @@ public class NodeConnectionsServiceTests extends ESTestCase { private ClusterState clusterStateFromNodes(List nodes) { final DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); for (DiscoveryNode node : nodes) { - builder.put(node); + builder.add(node); } return ClusterState.builder(new ClusterName("test")).nodes(builder).build(); } diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java index 31197e0a9a4..1f98f2cdc93 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java @@ -154,7 +154,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa private ClusterState createClusterStateWithStartedShards(String reason) { int numberOfNodes = 1 + numberOfReplicas; DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); - IntStream.rangeClosed(1, numberOfNodes).mapToObj(node -> newNode("node" + node)).forEach(nodes::put); + IntStream.rangeClosed(1, numberOfNodes).mapToObj(node -> newNode("node" + node)).forEach(nodes::add); ClusterState stateAfterAddingNode = ClusterState.builder(clusterState).nodes(nodes).build(); RoutingTable afterReroute = diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index f6f7aaf3228..f1f20511fcc 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -129,7 +129,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { // create one that won't fail ClusterState clusterState = ClusterState.builder(createClusterState("source", numShards, 0, - Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().put(newNode("node1"))) + Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), @@ -157,7 +157,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { .put("index.blocks.write", true) .put("index.similarity.default.type", "BM25") .put("index.analysis.analyzer.my_analyzer.tokenizer", "keyword") - .build())).nodes(DiscoveryNodes.builder().put(newNode("node1"))) + .build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), diff --git a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index ec741a908c5..d6a83108d0f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -123,13 +123,13 @@ public class DiscoveryNodesTests extends ESTestCase { DiscoveryNode masterB = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesB); DiscoveryNodes.Builder builderA = DiscoveryNodes.builder(); - nodesA.stream().forEach(builderA::put); + nodesA.stream().forEach(builderA::add); final String masterAId = masterA == null ? null : masterA.getId(); builderA.masterNodeId(masterAId); builderA.localNodeId(RandomPicks.randomFrom(random(), nodesA).getId()); DiscoveryNodes.Builder builderB = DiscoveryNodes.builder(); - nodesB.stream().forEach(builderB::put); + nodesB.stream().forEach(builderB::add); final String masterBId = masterB == null ? null : masterB.getId(); builderB.masterNodeId(masterBId); builderB.localNodeId(RandomPicks.randomFrom(random(), nodesB).getId()); @@ -186,7 +186,7 @@ public class DiscoveryNodesTests extends ESTestCase { DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); List nodesList = randomNodes(numNodes); for (DiscoveryNode node : nodesList) { - discoBuilder = discoBuilder.put(node); + discoBuilder = discoBuilder.add(node); } discoBuilder.localNodeId(randomFrom(nodesList).getId()); discoBuilder.masterNodeId(randomFrom(nodesList).getId()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java index 14f4ccf5581..8ce039e9260 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -30,12 +29,9 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -45,8 +41,6 @@ import org.junit.Before; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.singleton; @@ -96,7 +90,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build(); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).localNodeId("node1").masterNodeId("node1")) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").masterNodeId("node1")) .build(); clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build(); // starting primaries @@ -113,10 +107,11 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()).remove("node2"); boolean nodeAvailableForAllocation = randomBoolean(); if (nodeAvailableForAllocation) { - nodes.put(newNode("node3")); + nodes.add(newNode("node3")); } clusterState = ClusterState.builder(clusterState).nodes(nodes).build(); - clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build(); + clusterState = ClusterState.builder(clusterState).routingResult( + allocationService.deassociateDeadNodes(clusterState, true, "reroute")).build(); ClusterState newState = clusterState; List unassignedShards = newState.getRoutingTable().shardsWithState(ShardRoutingState.UNASSIGNED); if (nodeAvailableForAllocation) { @@ -142,7 +137,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build(); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).localNodeId("node1").masterNodeId("node1")) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").masterNodeId("node1")) .build(); final long baseTimestampNanos = System.nanoTime(); allocationService.setNanoTimeOverride(baseTimestampNanos); @@ -169,7 +164,8 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { // remove node that has replica and reroute clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(nodeId)).build(); - clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build(); + clusterState = ClusterState.builder(clusterState).routingResult( + allocationService.deassociateDeadNodes(clusterState, true, "reroute")).build(); ClusterState stateWithDelayedShard = clusterState; // make sure the replica is marked as delayed (i.e. not reallocated) assertEquals(1, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShard)); @@ -239,8 +235,8 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("short_delay")).addAsNew(metaData.index("long_delay")).build()) .nodes(DiscoveryNodes.builder() - .put(newNode("node0", singleton(DiscoveryNode.Role.MASTER))).localNodeId("node0").masterNodeId("node0") - .put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build(); + .add(newNode("node0", singleton(DiscoveryNode.Role.MASTER))).localNodeId("node0").masterNodeId("node0") + .add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); // allocate shards clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build(); // start primaries @@ -284,7 +280,8 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { .build(); // make sure both replicas are marked as delayed (i.e. not reallocated) allocationService.setNanoTimeOverride(baseTimestampNanos); - clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build(); + clusterState = ClusterState.builder(clusterState).routingResult( + allocationService.deassociateDeadNodes(clusterState, true, "reroute")).build(); final ClusterState stateWithDelayedShards = clusterState; assertEquals(2, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShards)); RoutingNodes.UnassignedShards.UnassignedIterator iter = stateWithDelayedShards.getRoutingNodes().unassigned().iterator(); @@ -398,7 +395,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { .build()).build(); clusterState = ClusterState.builder(clusterState) .nodes(DiscoveryNodes.builder() - .put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4")) + .add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4")) .localNodeId("node1").masterNodeId("node1")) .build(); final long nodeLeftTimestampNanos = System.nanoTime(); @@ -425,7 +422,8 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { // remove node that has replica and reroute clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder(clusterState.nodes()).remove(nodeIdOfFooReplica)).build(); - clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "fake node left")).build(); + clusterState = ClusterState.builder(clusterState).routingResult( + allocationService.deassociateDeadNodes(clusterState, true, "fake node left")).build(); ClusterState stateWithDelayedShard = clusterState; // make sure the replica is marked as delayed (i.e. not reallocated) assertEquals(1, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShard)); @@ -469,7 +467,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { clusterState = ClusterState.builder(stateWithDelayedShard).nodes( DiscoveryNodes.builder(stateWithDelayedShard.nodes()).remove(nodeIdOfBarReplica)).build(); ClusterState stateWithShorterDelay = ClusterState.builder(clusterState).routingResult( - allocationService.reroute(clusterState, "fake node left")).build(); + allocationService.deassociateDeadNodes(clusterState, true, "fake node left")).build(); delayedAllocationService.setNanoTimeOverride(clusterChangeEventTimestampNanos); delayedAllocationService.clusterChanged( new ClusterChangedEvent("fake node left", stateWithShorterDelay, stateWithDelayedShard)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java index 32072282d6f..9e81f81e43b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java @@ -90,7 +90,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase { logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1); Builder discoBuilder = DiscoveryNodes.builder(); for (int i = 0; i < this.numberOfReplicas + 1; i++) { - discoBuilder = discoBuilder.put(newNode("node" + i)); + discoBuilder = discoBuilder.add(newNode("node" + i)); } this.clusterState = ClusterState.builder(clusterState).nodes(discoBuilder).build(); RoutingAllocation.Result rerouteResult = allocationService.reroute(clusterState, "reroute"); @@ -161,7 +161,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase { final int newNodes = randomInt(10); logger.info("adding [{}] nodes", newNodes); for (int i = 0; i < newNodes; i++) { - nodesBuilder.put(newNode("extra_" + i)); + nodesBuilder.add(newNode("extra_" + i)); } this.clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); RoutingAllocation.Result rerouteResult = allocationService.reroute(this.clusterState, "nodes added"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java index 9da5e76ed1f..0f3ad8001c3 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java @@ -83,7 +83,7 @@ public class RoutingTableTests extends ESAllocationTestCase { logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1); Builder discoBuilder = DiscoveryNodes.builder(); for (int i = 0; i < this.numberOfReplicas + 1; i++) { - discoBuilder = discoBuilder.put(newNode("node" + i)); + discoBuilder = discoBuilder.add(newNode("node" + i)); } this.clusterState = ClusterState.builder(clusterState).nodes(discoBuilder).build(); RoutingAllocation.Result rerouteResult = ALLOCATION_SERVICE.reroute(clusterState, "reroute"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 0854d27e208..f35bd6d5597 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -26,18 +26,18 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.io.stream.ByteBufferStreamInput; -import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.io.stream.ByteBufferStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.ESAllocationTestCase; import java.io.IOException; @@ -175,7 +175,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index(index)).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); // starting primaries clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build(); @@ -215,7 +215,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); // starting primaries clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build(); @@ -224,7 +224,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false)); // remove node2 and reroute clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); - clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); + clusterState = ClusterState.builder(clusterState).routingResult(allocation.deassociateDeadNodes(clusterState, true, "reroute")).build(); // verify that NODE_LEAVE is the reason for meta assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); @@ -244,7 +244,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); // starting primaries clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build(); @@ -294,7 +294,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test1")).addAsNew(metaData.index("test2")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0)); // starting primaries @@ -305,7 +305,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { // remove node2 and reroute clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); // make sure both replicas are marked as delayed (i.e. not reallocated) - clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); + clusterState = ClusterState.builder(clusterState).routingResult(allocation.deassociateDeadNodes(clusterState, true, "reroute")).build(); assertThat(clusterState.prettyPrint(), UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(2)); } @@ -322,7 +322,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test1")).addAsNew(metaData.index("test2")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0)); // starting primaries @@ -334,7 +334,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { final long baseTime = System.nanoTime(); allocation.setNanoTimeOverride(baseTime); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); - clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); + clusterState = ClusterState.builder(clusterState).routingResult(allocation.deassociateDeadNodes(clusterState, true, "reroute")).build(); final long delta = randomBoolean() ? 0 : randomInt((int) expectMinDelaySettingsNanos - 1); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java index b3679208276..9cfac5da16b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java @@ -55,8 +55,8 @@ public class ActiveAllocationIdTests extends ESAllocationTestCase { .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("adding three nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put( - newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add( + newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build(); @@ -83,7 +83,7 @@ public class ActiveAllocationIdTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove("node1")) .build(); - rerouteResult = allocation.reroute(clusterState, "reroute"); + rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build(); assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(2)); @@ -92,7 +92,7 @@ public class ActiveAllocationIdTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove("node2").remove("node3")) .build(); - rerouteResult = allocation.reroute(clusterState, "reroute"); + rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build(); // active allocation ids should not be updated diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index cf90379a50f..687343b16d9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -108,7 +108,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { logger.info("now, start one more node, check that rebalancing will happen because we set it to always"); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); - nodes.put(newNode("node2")); + nodes.add(newNode("node2")); clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); @@ -178,7 +178,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { logger.info("now, start one more node, check that rebalancing will happen because we set it to always"); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); - nodes.put(newNode("node2")); + nodes.add(newNode("node2")); clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); @@ -257,7 +257,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { logger.info("now, start [{}] more node, check that rebalancing will happen because we set it to always", numNodes); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); for (int i = 0; i < numNodes; i++) { - nodes.put(newNode("node" + (i + nodeOffset))); + nodes.add(newNode("node" + (i + nodeOffset))); } clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); @@ -304,7 +304,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { logger.info("start {} nodes", numberOfNodes); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); for (int i = 0; i < numberOfNodes; i++) { - nodes.put(newNode("node" + i)); + nodes.add(newNode("node" + i)); } ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build(); routingTable = service.reroute(clusterState, "reroute").routingTable(); @@ -397,6 +397,8 @@ public class AddIncrementallyTests extends ESAllocationTestCase { } clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); + clusterState = ClusterState.builder(clusterState) + .routingResult(service.deassociateDeadNodes(clusterState, true, "reroute")).build(); RoutingNodes routingNodes = clusterState.getRoutingNodes(); logger.info("start all the primary shards, replicas will start initializing"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index 79880d9f4d2..ed91d98e532 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -51,11 +51,8 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; -import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.test.ESAllocationTestCase; -import java.util.Collections; - import static java.util.Collections.singleton; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; @@ -82,7 +79,7 @@ public class AllocationCommandsTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); @@ -141,10 +138,10 @@ public class AllocationCommandsTests extends ESAllocationTestCase { logger.info("--> adding 3 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) - .put(newNode("node3")) - .put(newNode("node4", singleton(DiscoveryNode.Role.MASTER))) + .add(newNode("node1")) + .add(newNode("node2")) + .add(newNode("node3")) + .add(newNode("node4", singleton(DiscoveryNode.Role.MASTER))) ).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); @@ -263,9 +260,9 @@ public class AllocationCommandsTests extends ESAllocationTestCase { logger.info("--> adding 3 nodes"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) - .put(newNode("node3")) + .add(newNode("node1")) + .add(newNode("node2")) + .add(newNode("node3")) ).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java index 86f3bd5704b..9b5b8db7acf 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java @@ -67,7 +67,7 @@ public class AllocationPriorityTests extends ESAllocationTestCase { .build(); ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index d78156b2217..13ade5265a6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -72,8 +72,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -91,7 +91,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", singletonMap("rack_id", "2"))) + .add(newNode("node3", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -111,7 +111,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, make sure nothing moves"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "3"))) + .add(newNode("node4", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); assertThat(routingTable, sameInstance(clusterState.routingTable())); @@ -140,9 +140,9 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) - .put(newNode("node3", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node3", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -160,7 +160,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "2"))) + .add(newNode("node4", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -180,7 +180,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, make sure nothing moves"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node5", singletonMap("rack_id", "3"))) + .add(newNode("node5", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); assertThat(routingTable, sameInstance(clusterState.routingTable())); @@ -214,8 +214,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -239,7 +239,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", singletonMap("rack_id", "2"))) + .add(newNode("node3", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -264,7 +264,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, some more relocation should happen"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "3"))) + .add(newNode("node4", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -305,8 +305,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -324,7 +324,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", singletonMap("rack_id", "2"))) + .add(newNode("node3", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -353,7 +353,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, some more relocation should happen"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "3"))) + .add(newNode("node4", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -396,8 +396,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -415,7 +415,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", singletonMap("rack_id", "2"))) + .add(newNode("node3", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -435,7 +435,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, we will have another relocation"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "3"))) + .add(newNode("node4", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -474,10 +474,10 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) - .put(newNode("node3", singletonMap("rack_id", "1"))) - .put(newNode("node4", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node3", singletonMap("rack_id", "1"))) + .add(newNode("node4", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -495,7 +495,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node5", singletonMap("rack_id", "2"))) + .add(newNode("node5", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -515,7 +515,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, we will have another relocation"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node6", singletonMap("rack_id", "3"))) + .add(newNode("node6", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -555,8 +555,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -572,7 +572,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", singletonMap("rack_id", "2"))) + .add(newNode("node3", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -592,7 +592,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, make sure nothing moves"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "3"))) + .add(newNode("node4", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); assertThat(routingTable, sameInstance(clusterState.routingTable())); @@ -622,9 +622,9 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) - .put(newNode("node3", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node3", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -640,7 +640,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "2"))) + .add(newNode("node4", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -660,7 +660,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, make sure nothing moves"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node5", singletonMap("rack_id", "3"))) + .add(newNode("node5", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); assertThat(routingTable, sameInstance(clusterState.routingTable())); @@ -697,8 +697,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -712,7 +712,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", singletonMap("rack_id", "2"))) + .add(newNode("node3", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -736,7 +736,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, some more relocation should happen"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "3"))) + .add(newNode("node4", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -776,8 +776,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes in different zones and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("A-0", singletonMap("zone", "a"))) - .put(newNode("B-0", singletonMap("zone", "b"))) + .add(newNode("A-0", singletonMap("zone", "a"))) + .add(newNode("B-0", singletonMap("zone", "b"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -798,7 +798,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node in zone 'a' and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("A-1", singletonMap("zone", "a"))) + .add(newNode("A-1", singletonMap("zone", "a"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -837,12 +837,12 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding 5 nodes in different zones and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("A-0", singletonMap("zone", "a"))) - .put(newNode("A-1", singletonMap("zone", "a"))) - .put(newNode("A-2", singletonMap("zone", "a"))) - .put(newNode("A-3", singletonMap("zone", "a"))) - .put(newNode("A-4", singletonMap("zone", "a"))) - .put(newNode("B-0", singletonMap("zone", "b"))) + .add(newNode("A-0", singletonMap("zone", "a"))) + .add(newNode("A-1", singletonMap("zone", "a"))) + .add(newNode("A-2", singletonMap("zone", "a"))) + .add(newNode("A-3", singletonMap("zone", "a"))) + .add(newNode("A-4", singletonMap("zone", "a"))) + .add(newNode("B-0", singletonMap("zone", "b"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index 9d0b27eac1a..2e2eabf5063 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -46,6 +46,7 @@ import org.hamcrest.Matchers; import java.util.HashMap; import java.util.Map; + import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; @@ -129,7 +130,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { logger.info("start " + numberOfNodes + " nodes"); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); for (int i = 0; i < numberOfNodes; i++) { - nodes.put(newNode("node" + i)); + nodes.add(newNode("node" + i)); } ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -165,7 +166,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { private ClusterState addNode(ClusterState clusterState, AllocationService strategy) { logger.info("now, start 1 more node, check that rebalancing will happen because we set it to always"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node" + numberOfNodes))) + .add(newNode("node" + numberOfNodes))) .build(); RoutingTable routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -191,11 +192,18 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { logger.info("Removing half the nodes (" + (numberOfNodes + 1) / 2 + ")"); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); + boolean removed = false; for (int i = (numberOfNodes + 1) / 2; i <= numberOfNodes; i++) { nodes.remove("node" + i); + removed = true; } clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); + if (removed) { + clusterState = ClusterState.builder(clusterState).routingResult( + strategy.deassociateDeadNodes(clusterState, randomBoolean(), "removed nodes") + ).build(); + } RoutingNodes routingNodes = clusterState.getRoutingNodes(); logger.info("start all the primary shards, replicas will start initializing"); @@ -378,7 +386,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); for (int i = 0; i < 4; i++) { DiscoveryNode node = newNode("node" + i); - nodes.put(node); + nodes.add(node); } ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java index fe264e17378..8640868bd2d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java @@ -118,7 +118,7 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase { RoutingTable routingTable = routingTableBuilder.build(); DiscoveryNodes.Builder builderDiscoNodes = DiscoveryNodes.builder(); for (String node : nodes) { - builderDiscoNodes.put(newNode(node)); + builderDiscoNodes.add(newNode(node)); } ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).nodes(builderDiscoNodes.build()).build(); if (balanceFirst()) { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java index 458432ff78e..e52738fcc4d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java @@ -64,7 +64,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -121,7 +121,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 1 more node, check that rebalancing will happen (for test1) because we set it to always"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3"))) + .add(newNode("node3"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -150,7 +150,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -226,7 +226,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to primaries_active"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3"))) + .add(newNode("node3"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -254,7 +254,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -311,7 +311,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to primaries_active"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3"))) + .add(newNode("node3"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -338,7 +338,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -433,7 +433,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to all_active"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3"))) + .add(newNode("node3"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -461,7 +461,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -518,7 +518,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3"))) + .add(newNode("node3"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -545,7 +545,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -621,7 +621,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3"))) + .add(newNode("node3"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -665,7 +665,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -686,7 +686,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.debug("now, start 1 more node, check that rebalancing will not happen since we unassigned shards"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node2"))) + .add(newNode("node2"))) .build(); logger.debug("reroute and check that nothing has changed"); RoutingAllocation.Result reroute = strategy.reroute(clusterState, "reroute"); @@ -764,7 +764,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -785,7 +785,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.debug("now, start 1 more node, check that rebalancing will not happen since we have shard sync going on"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node2"))) + .add(newNode("node2"))) .build(); logger.debug("reroute and check that nothing has changed"); RoutingAllocation.Result reroute = strategy.reroute(clusterState, "reroute"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java index 5d8cdcb838f..6b330fa738c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java @@ -69,7 +69,7 @@ public class ConcurrentRebalanceRoutingTests extends ESAllocationTestCase { } logger.info("start two nodes and fully start the shards"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -93,7 +93,7 @@ public class ConcurrentRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")).put(newNode("node4")).put(newNode("node5")).put(newNode("node6")).put(newNode("node7")).put(newNode("node8")).put(newNode("node9")).put(newNode("node10"))) + .add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6")).add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10"))) .build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index 856d8361779..914b6a5d916 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -60,8 +60,8 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> adding 2 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); @@ -84,10 +84,10 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(); String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1"; clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode(nodeIdRemaining)) + .add(newNode(nodeIdRemaining)) ).build(); - rerouteResult = allocation.reroute(clusterState, "reroute"); + rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); assertThat(clusterState.getRoutingNodes().node(nodeIdRemaining).iterator().next().primary(), equalTo(true)); @@ -111,8 +111,8 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> adding 2 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); @@ -133,7 +133,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> adding additional node"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")) + .add(newNode("node3")) ).build(); rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); @@ -158,10 +158,10 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> fail primary shard recovering instance on node3 being initialized by killing node3"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode(origPrimaryNodeId)) - .put(newNode(origReplicaNodeId)) + .add(newNode(origPrimaryNodeId)) + .add(newNode(origReplicaNodeId)) ).build(); - rerouteResult = allocation.reroute(clusterState, "reroute"); + rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(STARTED)); @@ -185,8 +185,8 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> adding 2 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); @@ -207,7 +207,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> adding additional node"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")) + .add(newNode("node3")) ).build(); rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); @@ -232,10 +232,10 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> fail primary shard recovering instance on 'origPrimaryNodeId' being relocated"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node3")) - .put(newNode(origReplicaNodeId)) + .add(newNode("node3")) + .add(newNode(origReplicaNodeId)) ).build(); - rerouteResult = allocation.reroute(clusterState, "reroute"); + rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); assertThat(clusterState.getRoutingNodes().node(origReplicaNodeId).iterator().next().state(), equalTo(STARTED)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java index e9f487b1e10..bcf5be90d9d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java @@ -143,7 +143,7 @@ public class DecisionsImpactOnClusterHealthTests extends ESAllocationTestCase { // any allocations on it final DiscoveryNodes.Builder discoveryNodes = DiscoveryNodes.builder(); for (int i = 0; i < numShards; i++) { - discoveryNodes.put(newNode("node" + i)); + discoveryNodes.add(newNode("node" + i)); } clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java index 83a4c17a631..f3aa1a26527 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java @@ -58,7 +58,7 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTest ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -82,7 +82,7 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTest assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2)); logger.info("Start another node and perform rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -100,7 +100,7 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTest logger.info("kill the node [{}] of the primary shard for the relocating replica", indexShardRoutingTable.primaryShard().currentNodeId()); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build(); prevRoutingTable = routingTable; - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); logger.info("make sure all the primary shards are active"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java index 7d2ffb07ddf..b6a934b0666 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java @@ -80,7 +80,7 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -95,7 +95,7 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase { assertEquals(1, clusterState.getRoutingNodes().unassigned().size()); logger.info("Add another one node and reroute"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -133,7 +133,7 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 3b5332eb237..2dae0c6c2d6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -57,7 +57,7 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start 4 nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -90,7 +90,7 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase { ) .build(); prevRoutingTable = routingTable; - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.getRoutingNodes(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 0b9e20b3578..e66e35635e7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -73,8 +73,8 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { logger.info("--> adding 2 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); @@ -95,7 +95,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { logger.info("--> adding additional node"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")) + .add(newNode("node3")) ).build(); rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); @@ -163,7 +163,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -241,7 +241,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding single node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -296,7 +296,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { logger.info("Adding {} nodes and performing rerouting", numberOfReplicas + 1); DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(); for (int i = 0; i < numberOfReplicas + 1; i++) { - nodeBuilder.put(newNode("node" + Integer.toString(i))); + nodeBuilder.add(newNode("node" + Integer.toString(i))); } clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); while (!clusterState.routingTable().shardsWithState(UNASSIGNED).isEmpty()) { @@ -362,7 +362,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -419,7 +419,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -461,7 +461,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { } logger.info("Adding third node and reroute"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -510,7 +510,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); // add 4 nodes - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2)); @@ -552,7 +552,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); // add 4 nodes - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java index 0255e18e529..a1f5f92e0c7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java @@ -116,7 +116,7 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase { RoutingTable routingTable = routingTableBuilder.build(); ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))) + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) .build(); routingTable = service.reroute(clusterState, "reroute", false).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java index ef0a589e50a..84b491dfa7b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java @@ -66,10 +66,10 @@ public class FilterRoutingTests extends ESAllocationTestCase { logger.info("--> adding four nodes and performing rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("tag1", "value1"))) - .put(newNode("node2", singletonMap("tag1", "value2"))) - .put(newNode("node3", singletonMap("tag1", "value3"))) - .put(newNode("node4", singletonMap("tag1", "value4"))) + .add(newNode("node1", singletonMap("tag1", "value1"))) + .add(newNode("node2", singletonMap("tag1", "value2"))) + .add(newNode("node3", singletonMap("tag1", "value3"))) + .add(newNode("node4", singletonMap("tag1", "value4"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -114,10 +114,10 @@ public class FilterRoutingTests extends ESAllocationTestCase { logger.info("--> adding two nodes and performing rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("tag1", "value1"))) - .put(newNode("node2", singletonMap("tag1", "value2"))) - .put(newNode("node3", singletonMap("tag1", "value3"))) - .put(newNode("node4", singletonMap("tag1", "value4"))) + .add(newNode("node1", singletonMap("tag1", "value1"))) + .add(newNode("node2", singletonMap("tag1", "value2"))) + .add(newNode("node3", singletonMap("tag1", "value3"))) + .add(newNode("node4", singletonMap("tag1", "value4"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -185,7 +185,7 @@ public class FilterRoutingTests extends ESAllocationTestCase { logger.info("--> adding two nodes and performing rerouting"); DiscoveryNode node1 = newNode("node1", singletonMap("tag1", "value1")); DiscoveryNode node2 = newNode("node2", singletonMap("tag1", "value2")); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(node1).put(node2)).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(node1).add(node2)).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat(clusterState.getRoutingNodes().node(node1.getId()).numberOfShardsWithState(INITIALIZING), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java index 79236ddd2b7..9b93e556b31 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java @@ -82,7 +82,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { logger.info("Adding three node and performing rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -211,7 +211,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { } logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -230,7 +230,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { logger.info("Add another node and perform rerouting, nothing will happen since primary not started"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -294,7 +294,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { logger.info("Add another node and perform rerouting, nothing will happen since primary not started"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -363,7 +363,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { logger.info("Adding three node and performing rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java index a5acb11ce4b..133da3c005f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java @@ -28,8 +28,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; -import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; -import org.elasticsearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; @@ -66,7 +64,7 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase { RoutingTable routingTable = routingTableBuilder.build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData).routingTable(routingTable).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))) + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) .build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute", false).routingTable(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 9859cd4d570..9050a91222d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -105,7 +105,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { } logger.info("start two nodes and fully start the shards"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -145,7 +145,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { } clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", VersionUtils.getPreviousVersion()))) + .add(newNode("node3", VersionUtils.getPreviousVersion()))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -161,7 +161,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4"))) + .add(newNode("node4"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -230,7 +230,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { } } for (DiscoveryNode node : nodes) { - nodesBuilder.put(node); + nodesBuilder.add(node); } clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); clusterState = stabilize(clusterState, service); @@ -267,29 +267,29 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue()); } clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("old0", VersionUtils.getPreviousVersion())) - .put(newNode("old1", VersionUtils.getPreviousVersion())) - .put(newNode("old2", VersionUtils.getPreviousVersion()))).build(); + .add(newNode("old0", VersionUtils.getPreviousVersion())) + .add(newNode("old1", VersionUtils.getPreviousVersion())) + .add(newNode("old2", VersionUtils.getPreviousVersion()))).build(); clusterState = stabilize(clusterState, service); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("old0", VersionUtils.getPreviousVersion())) - .put(newNode("old1", VersionUtils.getPreviousVersion())) - .put(newNode("new0"))).build(); + .add(newNode("old0", VersionUtils.getPreviousVersion())) + .add(newNode("old1", VersionUtils.getPreviousVersion())) + .add(newNode("new0"))).build(); clusterState = stabilize(clusterState, service); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node0", VersionUtils.getPreviousVersion())) - .put(newNode("new1")) - .put(newNode("new0"))).build(); + .add(newNode("node0", VersionUtils.getPreviousVersion())) + .add(newNode("new1")) + .add(newNode("new0"))).build(); clusterState = stabilize(clusterState, service); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("new2")) - .put(newNode("new1")) - .put(newNode("new0"))).build(); + .add(newNode("new2")) + .add(newNode("new1")) + .add(newNode("new0"))).build(); clusterState = stabilize(clusterState, service); routingTable = clusterState.routingTable(); @@ -334,7 +334,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(newNode).put(oldNode1).put(oldNode2)).build(); + .nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)).build(); AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {new NodeVersionAllocationDecider(Settings.EMPTY)}); AllocationService strategy = new MockAllocationService(Settings.EMPTY, allocationDeciders, @@ -365,7 +365,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), new RestoreSource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test")).build()) - .nodes(DiscoveryNodes.builder().put(newNode).put(oldNode1).put(oldNode2)).build(); + .nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)).build(); AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{ new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY), new NodeVersionAllocationDecider(Settings.EMPTY)}); @@ -383,7 +383,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { private ClusterState stabilize(ClusterState clusterState, AllocationService service) { logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint()); - RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); + RoutingTable routingTable = service.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertRecoveryNodeVersions(routingNodes); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java index 3812a550169..9c24993a723 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java @@ -66,8 +66,8 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocation logger.info("adding two nodes and performing rerouting till all are allocated"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("tag1", "value1"))) - .put(newNode("node2", singletonMap("tag1", "value2")))).build(); + .add(newNode("node1", singletonMap("tag1", "value1"))) + .add(newNode("node2", singletonMap("tag1", "value2")))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -92,7 +92,7 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocation .build())) .build(); clusterState = ClusterState.builder(clusterState).metaData(metaData).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(); - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); logger.info("[{}] primaries should be still started but [{}] other primaries should be unassigned", numberOfShards, numberOfShards); @@ -102,7 +102,7 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocation logger.info("start node back up"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node1", singletonMap("tag1", "value1")))).build(); + .add(newNode("node1", singletonMap("tag1", "value1")))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java index decf0ed0ea3..b9ac52c69a4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java @@ -62,7 +62,7 @@ public class PreferPrimaryAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("adding two nodes and performing rerouting till all are allocated"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java index b5f1ab7235c..cca0a5345d0 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java @@ -58,11 +58,11 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingAllocation.Result result = strategy.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(result).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); result = strategy.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(result).build(); @@ -77,9 +77,9 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).routingResult(result).build(); logger.info("Adding third node and reroute and kill first node"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3")).remove("node1")).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3")).remove("node1")).build(); RoutingTable prevRoutingTable = clusterState.routingTable(); - result = strategy.reroute(clusterState, "reroute"); + result = strategy.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(result).build(); routingNodes = clusterState.getRoutingNodes(); routingTable = clusterState.routingTable(); @@ -111,7 +111,7 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build(); @@ -131,9 +131,9 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase { String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(); String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1"; clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode(nodeIdRemaining)) + .add(newNode(nodeIdRemaining)) ).build(); - rerouteResult = allocation.reroute(clusterState, "reroute"); + rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build(); routingNodes = clusterState.getRoutingNodes(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java index 264a009650e..609d8324561 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java @@ -61,7 +61,7 @@ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTes ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -73,7 +73,7 @@ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTes assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5)); logger.info("start another node, replica will start recovering form primary"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -81,7 +81,7 @@ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTes assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5)); logger.info("start another node, make sure the primary is not relocated"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index 7f62cced708..061aa901885 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -93,19 +93,25 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase { int numNodes = scaledRandomIntBetween(1, 3); for (int j = 0; j < numNodes; j++) { logger.info("adding node [{}]", nodeIdCounter); - newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++))); + newNodesBuilder.add(newNode("NODE_" + (nodeIdCounter++))); } } + boolean nodesRemoved = false; if (nodeIdCounter > 1 && rarely()) { int nodeId = scaledRandomIntBetween(0, nodeIdCounter - 2); logger.info("removing node [{}]", nodeId); newNodesBuilder.remove("NODE_" + nodeId); + nodesRemoved = true; } stateBuilder.nodes(newNodesBuilder.build()); clusterState = stateBuilder.build(); - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + if (nodesRemoved) { + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); + } else { + routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + } clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); if (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size() > 0) { routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)) @@ -119,7 +125,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase { DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes()); for (int j = 0; j < (maxNumReplicas - clusterState.nodes().getSize()); j++) { logger.info("adding node [{}]", nodeIdCounter); - newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++))); + newNodesBuilder.add(newNode("NODE_" + (nodeIdCounter++))); } stateBuilder.nodes(newNodesBuilder.build()); clusterState = stateBuilder.build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index 0d743da39f3..b1d83b767b0 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -99,7 +99,7 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase { } logger.info("start two nodes and fully start the shards"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -126,7 +126,7 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase { logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")).put(newNode("node4")).put(newNode("node5")).put(newNode("node6")).put(newNode("node7")).put(newNode("node8")).put(newNode("node9")).put(newNode("node10"))) + .add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6")).add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java index aab3c8d6c2b..440d651f77b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java @@ -68,7 +68,7 @@ public class ReplicaAllocatedAfterPrimaryTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue()); logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java index 4283cb8475c..cd31f75b50b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java @@ -63,7 +63,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { logger.info("Adding three node and performing rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build(); routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); @@ -133,7 +133,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -141,7 +141,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { logger.info("Add another node and perform rerouting, nothing will happen since primary not started"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -169,7 +169,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { logger.info("Add another node and perform rerouting, nothing will happen since primary not started"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -226,7 +226,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { logger.info("Adding three node and performing rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build(); RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); @@ -371,7 +371,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { logger.info("kill one node"); IndexShardRoutingTable indexShardRoutingTable = routingTable.index("test").shard(0); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build(); - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.getRoutingNodes(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java index e09d9790651..331adcd1468 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java @@ -63,9 +63,9 @@ public class SameShardRoutingTests extends ESAllocationTestCase { logger.info("--> adding two nodes with the same host"); clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder() - .put(new DiscoveryNode("node1", "node1", "node1", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(), + .add(new DiscoveryNode("node1", "node1", "node1", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT)) - .put(new DiscoveryNode("node2", "node2", "node2", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(), + .add(new DiscoveryNode("node2", "node2", "node2", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -82,7 +82,7 @@ public class SameShardRoutingTests extends ESAllocationTestCase { logger.info("--> add another node, with a different host, replicas will be allocating"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(new DiscoveryNode("node3", "node3", "node3", "test2", "test2", LocalTransportAddress.buildUnique(), emptyMap(), + .add(new DiscoveryNode("node3", "node3", "node3", "test2", "test2", LocalTransportAddress.buildUnique(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java index 29162aabb60..d50e44c48d9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java @@ -57,7 +57,7 @@ public class ShardVersioningTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 67fc93b600b..a90b88fa9df 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -63,7 +63,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -107,7 +107,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -170,7 +170,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding one node and reroute"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -195,7 +195,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build(); logger.info("Add another one node and reroute"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index ad9c0b8eef5..1b4d35d44d7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -80,7 +80,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue()); logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -112,7 +112,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1")); logger.info("Starting another node and making sure nothing changed"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -128,7 +128,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(); prevRoutingTable = routingTable; - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat(routingTable != prevRoutingTable, equalTo(true)); @@ -139,7 +139,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2")); logger.info("Start another node, make sure that things remain the same (shard is in node2 and initializing)"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -181,7 +181,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue()); logger.info("Adding one node and rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -244,7 +244,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); List nodes = new ArrayList<>(); for (int i = 0; i < (numberOfIndices / 2); i++) { - nodesBuilder.put(newNode("node" + i)); + nodesBuilder.add(newNode("node" + i)); } RoutingTable prevRoutingTable = routingTable; clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); @@ -282,7 +282,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { logger.info("Adding additional " + (numberOfIndices / 2) + " nodes, nothing should change"); nodesBuilder = DiscoveryNodes.builder(clusterState.nodes()); for (int i = (numberOfIndices / 2); i < numberOfIndices; i++) { - nodesBuilder.put(newNode("node" + i)); + nodesBuilder.add(newNode("node" + i)); } prevRoutingTable = routingTable; clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); @@ -348,7 +348,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { logger.info("Starting 3 nodes and rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))) .build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -369,7 +369,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { logger.info("Start two more nodes, things should remain the same"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node4")).put(newNode("node5"))) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4")).add(newNode("node5"))) .build(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java index 5c791b954f9..0eb317d198b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java @@ -67,7 +67,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue()); logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -84,7 +84,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue()); logger.info("Add another node and perform rerouting, nothing will happen since primary shards not started"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -134,7 +134,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(); prevRoutingTable = routingTable; - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat(prevRoutingTable != routingTable, equalTo(true)); @@ -150,7 +150,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase { logger.info("Start another node, backup shard should start initializing"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java index 4e32cd1e2cc..df169e3b893 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java @@ -51,7 +51,7 @@ public class StartedShardsRoutingTests extends ESAllocationTestCase { .build(); final Index index = indexMetaData.getIndex(); ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) .metaData(MetaData.builder().put(indexMetaData, false)); final ShardRouting initShard = TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.INITIALIZING); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java index becd61e6eb2..556c97a6ef4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java @@ -80,7 +80,7 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase { } logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -99,7 +99,7 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase { } logger.info("Add another node and perform rerouting, nothing will happen since primary not started"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -152,7 +152,7 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase { assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(10)); logger.info("Add another node and perform rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java index 0760367445d..b94aec95ca0 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -66,7 +66,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start one node, do reroute, only 3 should initialize"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -125,7 +125,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start one node, do reroute, only 3 should initialize"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -150,7 +150,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5)); logger.info("start another node, replicas should start being allocated"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -193,7 +193,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start one node, do reroute, only 5 should initialize"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0)); @@ -213,7 +213,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); logger.info("start another 2 nodes, 5 shards should be relocating - at most 5 are allowed per node"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2")).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2")).add(newNode("node3"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -256,7 +256,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start one node, do reroute, only 1 should initialize"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -273,7 +273,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2)); logger.info("start one more node, first non-primary should start being allocated"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -291,7 +291,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0); logger.info("start one more node, initializing second non-primary"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -301,7 +301,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1); logger.info("start one more node"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java index 47142062809..103e9027386 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java @@ -70,7 +70,7 @@ public class UpdateNumberOfReplicasTests extends ESAllocationTestCase { logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -121,7 +121,7 @@ public class UpdateNumberOfReplicasTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).replicaShards().get(1).state(), equalTo(UNASSIGNED)); logger.info("Add another node and start the added replica"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 87e5fd24a04..02a603fed63 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -123,8 +123,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding two nodes"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -156,7 +156,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node3"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")) + .add(newNode("node3")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -244,7 +244,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node4"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4")) + .add(newNode("node4")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -324,8 +324,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node1 and node2 node"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -395,7 +395,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node3"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")) + .add(newNode("node3")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -483,7 +483,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node4"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4")) + .add(newNode("node4")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -511,7 +511,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node5"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node5")) + .add(newNode("node5")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -592,8 +592,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .routingTable(routingTable).build(); logger.info("--> adding node1"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) // node2 is added because DiskThresholdDecider automatically ignore single-node clusters + .add(newNode("node1")) + .add(newNode("node2")) // node2 is added because DiskThresholdDecider automatically ignore single-node clusters ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -662,8 +662,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .routingTable(routingTable).build(); logger.info("--> adding node1"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node3")) // node3 is added because DiskThresholdDecider automatically ignore single-node clusters + .add(newNode("node1")) + .add(newNode("node3")) // node3 is added because DiskThresholdDecider automatically ignore single-node clusters ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -770,8 +770,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding two nodes"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -791,7 +791,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node3"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")) + .add(newNode("node3")) ).build(); AllocationCommand relocate1 = new MoveAllocationCommand("test", 0, "node2", "node3"); @@ -852,7 +852,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { MASTER_DATA_ROLES, Version.CURRENT); DiscoveryNode discoveryNode2 = new DiscoveryNode("node2", new LocalTransportAddress("2"), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT); - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(discoveryNode1).put(discoveryNode2).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build(); ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) @@ -969,7 +969,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { DiscoveryNode discoveryNode2 = new DiscoveryNode("", "node2", new LocalTransportAddress("2"), emptyMap(), singleton(DiscoveryNode.Role.DATA), Version.CURRENT); - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(discoveryNode1).put(discoveryNode2).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build(); ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) @@ -1035,7 +1035,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { DiscoveryNode discoveryNode3 = new DiscoveryNode("", "node3", new LocalTransportAddress("3"), emptyMap(), singleton(DiscoveryNode.Role.DATA), Version.CURRENT); ClusterState updateClusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(discoveryNode3)).build(); + .add(discoveryNode3)).build(); firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, null, true, ShardRoutingState.STARTED); secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", "node3", null, true, ShardRoutingState.RELOCATING); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 7ede869f0a6..80309004fff 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -121,8 +121,8 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(node_0) - .put(node_1) + .add(node_0) + .add(node_1) ).build(); // actual test -- after all that bloat :) @@ -186,8 +186,8 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { logger.info("--> adding two nodes"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(node_0) - .put(node_1) + .add(node_0) + .add(node_1) ).build(); // actual test -- after all that bloat :) @@ -317,7 +317,7 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { .metaData(metaData).routingTable(routingTableBuilder.build()).build(); AllocationService allocationService = createAllocationService(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))) + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); RoutingAllocation.Result result = allocationService.reroute(clusterState, "foo"); clusterState = ClusterState.builder(clusterState).routingTable(result.routingTable()).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java index 53932eef390..e880b09806c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java @@ -72,8 +72,8 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -100,8 +100,8 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -134,8 +134,8 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -179,8 +179,8 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding one nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -198,9 +198,9 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding one nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) - .put(newNode("node3")) + .add(newNode("node1")) + .add(newNode("node2")) + .add(newNode("node3")) ).build(); ClusterState prevState = clusterState; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -279,8 +279,8 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding one nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -293,9 +293,9 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding one nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) - .put(newNode("node3")) + .add(newNode("node1")) + .add(newNode("node2")) + .add(newNode("node3")) ).build(); ClusterState prevState = clusterState; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index 4fa6615ac45..5f7e8bbfa2b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -47,7 +47,7 @@ public class ClusterSerializationTests extends ESAllocationTestCase { .addAsNew(metaData.index("test")) .build(); - DiscoveryNodes nodes = DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).localNodeId("node1").masterNodeId("node2").build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).localNodeId("node1").masterNodeId("node2").build(); ClusterState clusterState = ClusterState.builder(new ClusterName("clusterName1")).nodes(nodes).metaData(metaData).routingTable(routingTable).build(); @@ -70,7 +70,7 @@ public class ClusterSerializationTests extends ESAllocationTestCase { .addAsNew(metaData.index("test")) .build(); - DiscoveryNodes nodes = DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes) .metaData(metaData).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java index 9957a6d3603..a0b6f7f0400 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java @@ -50,7 +50,7 @@ public class ClusterStateToStringTests extends ESAllocationTestCase { .addAsNew(metaData.index("test_idx")) .build(); - DiscoveryNodes nodes = DiscoveryNodes.builder().put(new DiscoveryNode("node_foo", LocalTransportAddress.buildUnique(), + DiscoveryNodes nodes = DiscoveryNodes.builder().add(new DiscoveryNode("node_foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)).localNodeId("node_foo").masterNodeId("node_foo").build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes) diff --git a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java index b8ebe9e6308..8dbda2838aa 100644 --- a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardShuffler; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -247,8 +246,8 @@ public class RoutingIteratorTests extends ESAllocationTestCase { node2Attributes.put("rack_id", "rack_2"); node2Attributes.put("zone", "zone2"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", unmodifiableMap(node1Attributes))) - .put(newNode("node2", unmodifiableMap(node2Attributes))) + .add(newNode("node1", unmodifiableMap(node1Attributes))) + .add(newNode("node2", unmodifiableMap(node2Attributes))) .localNodeId("node1") ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -295,8 +294,8 @@ public class RoutingIteratorTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("fred", "node1", singletonMap("disk", "ebs"))) - .put(newNode("barney", "node2", singletonMap("disk", "ephemeral"))) + .add(newNode("fred", "node1", singletonMap("disk", "ebs"))) + .add(newNode("barney", "node2", singletonMap("disk", "ephemeral"))) .localNodeId("node1") ).build(); @@ -369,8 +368,8 @@ public class RoutingIteratorTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) .localNodeId("node1") ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -442,9 +441,9 @@ public class RoutingIteratorTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) - .put(newNode("node3")) + .add(newNode("node1")) + .add(newNode("node2")) + .add(newNode("node3")) .localNodeId("node1") ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); diff --git a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java index 895ae6638e6..b1b7749d88c 100644 --- a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java @@ -155,8 +155,8 @@ public class ZenFaultDetectionTests extends ESTestCase { private DiscoveryNodes buildNodesForA(boolean master) { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - builder.put(nodeA); - builder.put(nodeB); + builder.add(nodeA); + builder.add(nodeB); builder.localNodeId(nodeA.getId()); builder.masterNodeId(master ? nodeA.getId() : nodeB.getId()); return builder.build(); @@ -164,8 +164,8 @@ public class ZenFaultDetectionTests extends ESTestCase { private DiscoveryNodes buildNodesForB(boolean master) { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - builder.put(nodeA); - builder.put(nodeB); + builder.add(nodeA); + builder.add(nodeB); builder.localNodeId(nodeB.getId()); builder.masterNodeId(master ? nodeB.getId() : nodeA.getId()); return builder.build(); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 2a0410f272b..b71310e2f6e 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -18,16 +18,22 @@ */ package org.elasticsearch.discovery.zen; +import com.carrotsearch.randomizedtesting.annotations.Repeat; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NotMasterException; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; -import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -38,6 +44,7 @@ import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.membership.MembershipAction; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -51,6 +58,7 @@ import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -63,13 +71,21 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.shuffle; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.elasticsearch.test.ESAllocationTestCase.createAllocationService; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -101,7 +117,7 @@ public class NodeJoinControllerTests extends ESTestCase { // make sure we have a master setState(clusterService, ClusterState.builder(clusterService.state()).nodes( DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.getId()))); - nodeJoinController = new NodeJoinController(clusterService, new NoopAllocationService(Settings.EMPTY), + nodeJoinController = new NodeJoinController(clusterService, createAllocationService(Settings.EMPTY), new ElectMasterService(Settings.EMPTY), new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), Settings.EMPTY); } @@ -412,7 +428,7 @@ public class NodeJoinControllerTests extends ESTestCase { final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(state.nodes()); final DiscoveryNode other_node = new DiscoveryNode("other_node", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); - nodesBuilder.put(other_node); + nodesBuilder.add(other_node); setState(clusterService, ClusterState.builder(state).nodes(nodesBuilder)); state = clusterService.state(); @@ -524,60 +540,137 @@ public class NodeJoinControllerTests extends ESTestCase { } public void testRejectingJoinWithSameAddressButDifferentId() throws InterruptedException, ExecutionException { + addNodes(randomInt(5)); ClusterState state = clusterService.state(); - final DiscoveryNode other_node = new DiscoveryNode("other_node", state.nodes().getLocalNode().getAddress(), - emptyMap(), emptySet(), Version.CURRENT); + final DiscoveryNode existing = randomFrom(StreamSupport.stream(state.nodes().spliterator(), false).collect(Collectors.toList())); + final DiscoveryNode other_node = new DiscoveryNode("other_node", existing.getAddress(), emptyMap(), emptySet(), Version.CURRENT); ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node)); assertThat(e.getMessage(), containsString("found existing node")); } - public void testRejectingJoinWithSameIdButDifferentAddress() throws InterruptedException, ExecutionException { + public void testRejectingJoinWithSameIdButDifferentNode() throws InterruptedException, ExecutionException { + addNodes(randomInt(5)); ClusterState state = clusterService.state(); - final DiscoveryNode other_node = new DiscoveryNode(state.nodes().getLocalNode().getId(), - new LocalTransportAddress(randomAsciiOfLength(20)), emptyMap(), emptySet(), Version.CURRENT); - - ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node)); - assertThat(e.getMessage(), containsString("found existing node")); - } - - public void testJoinWithSameIdSameAddressButDifferentMeta() throws InterruptedException, ExecutionException { - ClusterState state = clusterService.state(); - final DiscoveryNode localNode = state.nodes().getLocalNode(); + final DiscoveryNode existing = randomFrom(StreamSupport.stream(state.nodes().spliterator(), false).collect(Collectors.toList())); final DiscoveryNode other_node = new DiscoveryNode( - randomBoolean() ? localNode.getName() : "other_name", - localNode.getId(), localNode.getAddress(), - randomBoolean() ? localNode.getAttributes() : Collections.singletonMap("attr", "other"), - randomBoolean() ? localNode.getRoles() : new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))), - randomBoolean() ? localNode.getVersion() : VersionUtils.randomVersion(random())); + randomBoolean() ? existing.getName() : "other_name", + existing.getId(), + randomBoolean() ? existing.getAddress() : LocalTransportAddress.buildUnique(), + randomBoolean() ? existing.getAttributes() : Collections.singletonMap("attr", "other"), + randomBoolean() ? existing.getRoles() : new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))), + randomBoolean() ? existing.getVersion() : VersionUtils.randomVersion(random())); - joinNode(other_node); - - assertThat(clusterService.localNode(), equalTo(other_node)); + ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node)); + assertThat(e.getMessage(), containsString("found existing node")); } - static class NoopAllocationService extends AllocationService { + public void testRejectingRestartedNodeJoinsBeforeProcessingNodeLeft() throws InterruptedException, ExecutionException { + addNodes(randomInt(5)); + ClusterState state = clusterService.state(); + final DiscoveryNode existing = randomFrom(StreamSupport.stream(state.nodes().spliterator(), false).collect(Collectors.toList())); + joinNode(existing); // OK - public NoopAllocationService(Settings settings) { - super(settings, null, null, null, null); + final DiscoveryNode other_node = new DiscoveryNode(existing.getId(), existing.getAddress(), existing.getAttributes(), + existing.getRoles(), Version.CURRENT); + + ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node)); + assertThat(e.getMessage(), containsString("found existing node")); + } + + /** + * Tests tha node can become a master, even though the last cluster state it knows contains + * nodes that conflict with the joins it got and needs to become a master + */ + public void testElectionBasedOnConflictingNodes() throws InterruptedException, ExecutionException { + final DiscoveryNode masterNode = clusterService.localNode(); + final DiscoveryNode otherNode = new DiscoveryNode("other_node", LocalTransportAddress.buildUnique(), emptyMap(), + EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); + // simulate master going down with stale nodes in it's cluster state (for example when min master nodes is set to 2) + // also add some shards to that node + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); + discoBuilder.masterNodeId(null); + discoBuilder.add(otherNode); + ClusterState.Builder stateBuilder = ClusterState.builder(clusterService.state()).nodes(discoBuilder); + if (randomBoolean()) { + IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); + stateBuilder.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded()); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetaData.getIndex()); + RoutingTable.Builder routing = new RoutingTable.Builder(); + routing.addAsNew(indexMetaData); + final ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + + final DiscoveryNode primaryNode = randomBoolean() ? masterNode : otherNode; + final DiscoveryNode replicaNode = primaryNode.equals(masterNode) ? otherNode : masterNode; + final boolean primaryStarted = randomBoolean(); + indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, primaryNode.getId(), null, null, true, + primaryStarted ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING, + primaryStarted ? null : new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "getting there"))); + if (primaryStarted) { + boolean replicaStared = randomBoolean(); + indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, replicaNode.getId(), null, null, false, + replicaStared ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING, + replicaStared ? null : new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "getting there"))); + } else { + indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, null, null, null, false, + ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "life sucks"))); + } + indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build()); + stateBuilder.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build()); } - @Override - public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List startedShards, - boolean withReroute) { - return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); - } + setState(clusterService, stateBuilder.build()); - @Override - public RoutingAllocation.Result applyFailedShards(ClusterState clusterState, - List failedShards) { - return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); - } + final DiscoveryNode restartedNode = new DiscoveryNode(otherNode.getId(), + randomBoolean() ? otherNode.getAddress() : LocalTransportAddress.buildUnique(), otherNode.getAttributes(), + otherNode.getRoles(), Version.CURRENT); - @Override - protected RoutingAllocation.Result reroute(ClusterState clusterState, String reason, boolean debug) { - return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); + nodeJoinController.startElectionContext(); + final SimpleFuture joinFuture = joinNodeAsync(restartedNode); + final CountDownLatch elected = new CountDownLatch(1); + nodeJoinController.waitToBeElectedAsMaster(1, TimeValue.timeValueHours(5), new NodeJoinController.ElectionCallback() { + @Override + public void onElectedAsMaster(ClusterState state) { + elected.countDown(); + } + + @Override + public void onFailure(Throwable t) { + logger.error("failed to be elected as master", t); + throw new AssertionError("failed to be elected as master", t); + } + }); + + elected.await(); + + joinFuture.get(); // throw any exception + + final ClusterState finalState = clusterService.state(); + final DiscoveryNodes finalNodes = finalState.nodes(); + assertTrue(finalNodes.isLocalNodeElectedMaster()); + assertThat(finalNodes.getLocalNode(), equalTo(masterNode)); + assertThat(finalNodes.getSize(), equalTo(2)); + assertThat(finalNodes.get(restartedNode.getId()), equalTo(restartedNode)); + List activeShardsOnRestartedNode = + StreamSupport.stream(finalState.getRoutingNodes().node(restartedNode.getId()).spliterator(), false) + .filter(ShardRouting::active).collect(Collectors.toList()); + assertThat(activeShardsOnRestartedNode, empty()); + } + + + private void addNodes(int count) { + ClusterState state = clusterService.state(); + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(state.nodes()); + for (int i = 0;i< count;i++) { + final DiscoveryNode node = new DiscoveryNode("node_" + state.nodes().getSize() + i, LocalTransportAddress.buildUnique(), + emptyMap(), new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))), Version.CURRENT); + nodesBuilder.add(node); } + setState(clusterService, ClusterState.builder(state).nodes(nodesBuilder)); } protected void assertNodesInCurrentState(List expectedNodes) { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java index 667ca6fbccb..35335a8ede4 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.test.ESTestCase; @@ -56,13 +55,13 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { final DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); final int nodes = randomIntBetween(2, 16); for (int i = 0; i < nodes; i++) { - builder.put(node(i)); + builder.add(node(i)); } final ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(builder).build(); final DiscoveryNodes.Builder removeBuilder = DiscoveryNodes.builder(); for (int i = nodes; i < nodes + randomIntBetween(1, 16); i++) { - removeBuilder.put(node(i)); + removeBuilder.add(node(i)); } final List tasks = StreamSupport @@ -106,7 +105,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { boolean first = true; for (int i = 0; i < nodes; i++) { final DiscoveryNode node = node(i); - builder.put(node); + builder.add(node); if (first || randomBoolean()) { tasks.add(new ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task(node, randomBoolean() ? "left" : "failed")); } @@ -134,7 +133,8 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { when(electMasterService.hasEnoughMasterNodes(any(Iterable.class))).thenReturn(true); final AllocationService allocationService = mock(AllocationService.class); - when(allocationService.reroute(any(ClusterState.class), any(String.class))).thenReturn(mock(RoutingAllocation.Result.class)); + when(allocationService.deassociateDeadNodes(any(ClusterState.class), eq(true), any(String.class))) + .thenReturn(mock(RoutingAllocation.Result.class)); final BiFunction rejoin = (cs, r) -> { fail("rejoin should not be invoked"); @@ -158,7 +158,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { boolean first = true; for (int i = 0; i < nodes; i++) { final DiscoveryNode node = node(i); - builder.put(node); + builder.add(node); if (first || randomBoolean()) { tasks.add(new ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task(node, randomBoolean() ? "left" : "failed")); } @@ -171,7 +171,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { verify(electMasterService).hasEnoughMasterNodes(eq(remainingNodesClusterState.get().nodes())); verifyNoMoreInteractions(electMasterService); - verify(allocationService).reroute(eq(remainingNodesClusterState.get()), any(String.class)); + verify(allocationService).deassociateDeadNodes(eq(remainingNodesClusterState.get()), eq(true), any(String.class)); for (final ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task task : tasks) { assertNull(result.resultingState.nodes().get(task.node().getId())); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index 3d0d9ddd8b1..6248df7370f 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -210,7 +210,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { assert node != null; DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(state.nodes()) - .put(new DiscoveryNode("abc", new LocalTransportAddress("abc"), emptyMap(), + .add(new DiscoveryNode("abc", new LocalTransportAddress("abc"), emptyMap(), emptySet(), Version.CURRENT)).masterNodeId("abc"); ClusterState.Builder builder = ClusterState.builder(state); builder.nodes(nodes); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index 9db83f48f0e..ba4c14c2058 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -50,9 +50,9 @@ public class ZenDiscoveryUnitTests extends ESTestCase { ClusterName clusterName = new ClusterName("abc"); DiscoveryNodes.Builder currentNodes = DiscoveryNodes.builder(); - currentNodes.masterNodeId("a").put(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); + currentNodes.masterNodeId("a").add(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); DiscoveryNodes.Builder newNodes = DiscoveryNodes.builder(); - newNodes.masterNodeId("a").put(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); + newNodes.masterNodeId("a").add(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); ClusterState.Builder currentState = ClusterState.builder(clusterName); currentState.nodes(currentNodes); @@ -70,7 +70,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { assertFalse("should not ignore, because new state's version is higher to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); currentNodes = DiscoveryNodes.builder(); - currentNodes.masterNodeId("b").put(new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); + currentNodes.masterNodeId("b").add(new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); ; // version isn't taken into account, so randomize it to ensure this. if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java index 7b81c38f1a1..ea5779c33bb 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java @@ -92,7 +92,7 @@ public class UnicastZenPingIT extends ESTestCase { zenPingA.setPingContextProvider(new PingContextProvider() { @Override public DiscoveryNodes nodes() { - return DiscoveryNodes.builder().put(handleA.node).localNodeId("UZP_A").build(); + return DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A").build(); } @Override @@ -106,7 +106,7 @@ public class UnicastZenPingIT extends ESTestCase { zenPingB.setPingContextProvider(new PingContextProvider() { @Override public DiscoveryNodes nodes() { - return DiscoveryNodes.builder().put(handleB.node).localNodeId("UZP_B").build(); + return DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B").build(); } @Override @@ -126,7 +126,7 @@ public class UnicastZenPingIT extends ESTestCase { zenPingC.setPingContextProvider(new PingContextProvider() { @Override public DiscoveryNodes nodes() { - return DiscoveryNodes.builder().put(handleC.node).localNodeId("UZP_C").build(); + return DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C").build(); } @Override @@ -140,7 +140,7 @@ public class UnicastZenPingIT extends ESTestCase { zenPingD.setPingContextProvider(new PingContextProvider() { @Override public DiscoveryNodes nodes() { - return DiscoveryNodes.builder().put(handleD.node).localNodeId("UZP_D").build(); + return DiscoveryNodes.builder().add(handleD.node).localNodeId("UZP_D").build(); } @Override diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java index 42aa792c95f..9bb8bf801f1 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java @@ -237,7 +237,7 @@ public class PendingClusterStatesQueueTests extends ESTestCase { ClusterState state = lastClusterStatePerMaster[masterIndex]; if (state == null) { state = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(DiscoveryNodes.builder() - .put(new DiscoveryNode(masters[masterIndex], LocalTransportAddress.buildUnique(), + .add(new DiscoveryNode(masters[masterIndex], LocalTransportAddress.buildUnique(), emptyMap(), emptySet(),Version.CURRENT)).masterNodeId(masters[masterIndex]).build() ).build(); } else { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index 7d72fa5c4dc..b1658845afd 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -105,7 +105,7 @@ public class PublishClusterStateActionTests extends ESTestCase { this.service = service; this.listener = listener; this.logger = logger; - this.clusterState = ClusterState.builder(CLUSTER_NAME).nodes(DiscoveryNodes.builder().put(discoveryNode).localNodeId(discoveryNode.getId()).build()).build(); + this.clusterState = ClusterState.builder(CLUSTER_NAME).nodes(DiscoveryNodes.builder().add(discoveryNode).localNodeId(discoveryNode.getId()).build()).build(); } public MockNode setAsMaster() { @@ -260,7 +260,7 @@ public class PublishClusterStateActionTests extends ESTestCase { ClusterState clusterState = nodeA.clusterState; // cluster state update - add nodeB - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(clusterState.nodes()).put(nodeB.discoveryNode).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(clusterState.nodes()).add(nodeB.discoveryNode).build(); ClusterState previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); @@ -286,7 +286,7 @@ public class PublishClusterStateActionTests extends ESTestCase { // cluster state update 3 - register node C previousClusterState = clusterState; - discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeC.discoveryNode).build(); + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).add(nodeC.discoveryNode).build(); clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromDiff(nodeB.clusterState, clusterState); @@ -318,9 +318,9 @@ public class PublishClusterStateActionTests extends ESTestCase { // node B becomes the master and sends a version of the cluster state that goes back discoveryNodes = DiscoveryNodes.builder(discoveryNodes) - .put(nodeA.discoveryNode) - .put(nodeB.discoveryNode) - .put(nodeC.discoveryNode) + .add(nodeA.discoveryNode) + .add(nodeB.discoveryNode) + .add(nodeC.discoveryNode) .masterNodeId(nodeB.discoveryNode.getId()) .localNodeId(nodeB.discoveryNode.getId()) .build(); @@ -339,7 +339,7 @@ public class PublishClusterStateActionTests extends ESTestCase { MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).put(nodeB.discoveryNode).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build(); ClusterState previousClusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build(); ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); @@ -370,11 +370,11 @@ public class PublishClusterStateActionTests extends ESTestCase { }); // Initial cluster state - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.getId()).masterNodeId(nodeA.discoveryNode.getId()).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.getId()).masterNodeId(nodeA.discoveryNode.getId()).build(); ClusterState clusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build(); // cluster state update - add nodeB - discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build(); + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).add(nodeB.discoveryNode).build(); ClusterState previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); @@ -408,7 +408,7 @@ public class PublishClusterStateActionTests extends ESTestCase { assertProperMetaDataForVersion(event.state().metaData(), event.state().version()); } }); - discoveryNodesBuilder.put(node.discoveryNode); + discoveryNodesBuilder.add(node.discoveryNode); } AssertingAckListener[] listeners = new AssertingAckListener[numberOfIterations]; @@ -447,7 +447,7 @@ public class PublishClusterStateActionTests extends ESTestCase { MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).put(nodeB.discoveryNode).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build(); ClusterState previousClusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build(); ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); @@ -488,14 +488,14 @@ public class PublishClusterStateActionTests extends ESTestCase { final int masterNodes = randomIntBetween(1, 10); MockNode master = createMockNode("master"); - DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().put(master.discoveryNode); + DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().add(master.discoveryNode); for (int i = 1; i < masterNodes; i++) { - discoveryNodesBuilder.put(createMockNode("node" + i).discoveryNode); + discoveryNodesBuilder.add(createMockNode("node" + i).discoveryNode); } final int dataNodes = randomIntBetween(0, 5); final Settings dataSettings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(); for (int i = 0; i < dataNodes; i++) { - discoveryNodesBuilder.put(createMockNode("data_" + i, dataSettings).discoveryNode); + discoveryNodesBuilder.add(createMockNode("data_" + i, dataSettings).discoveryNode); } discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId()); DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); @@ -536,10 +536,10 @@ public class PublishClusterStateActionTests extends ESTestCase { } Collections.shuffle(Arrays.asList(nodeTypes), random()); - DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().put(master.discoveryNode); + DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().add(master.discoveryNode); for (int i = 0; i < nodeTypes.length; i++) { final MockNode mockNode = createMockNode("node" + i); - discoveryNodesBuilder.put(mockNode.discoveryNode); + discoveryNodesBuilder.add(mockNode.discoveryNode); switch (nodeTypes[i]) { case 1: mockNode.action.errorOnSend.set(true); @@ -552,7 +552,7 @@ public class PublishClusterStateActionTests extends ESTestCase { final int dataNodes = randomIntBetween(0, 3); // data nodes don't matter for (int i = 0; i < dataNodes; i++) { final MockNode mockNode = createMockNode("data_" + i, Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build()); - discoveryNodesBuilder.put(mockNode.discoveryNode); + discoveryNodesBuilder.add(mockNode.discoveryNode); if (randomBoolean()) { // we really don't care - just chaos monkey mockNode.action.errorOnCommit.set(randomBoolean()); @@ -638,7 +638,7 @@ public class PublishClusterStateActionTests extends ESTestCase { try { MockNode otherNode = createMockNode("otherNode"); state = ClusterState.builder(node.clusterState).nodes( - DiscoveryNodes.builder(node.nodes()).put(otherNode.discoveryNode).localNodeId(otherNode.discoveryNode.getId()).build() + DiscoveryNodes.builder(node.nodes()).add(otherNode.discoveryNode).localNodeId(otherNode.discoveryNode.getId()).build() ).incrementVersion().build(); node.action.validateIncomingState(state, node.clusterState); fail("node accepted state with existent but wrong local node"); @@ -729,7 +729,7 @@ public class PublishClusterStateActionTests extends ESTestCase { MockNode master = createMockNode("master", settings); MockNode node = createMockNode("node", settings); ClusterState state = ClusterState.builder(master.clusterState) - .nodes(DiscoveryNodes.builder(master.clusterState.nodes()).put(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build(); + .nodes(DiscoveryNodes.builder(master.clusterState.nodes()).add(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build(); for (int i = 0; i < 10; i++) { state = ClusterState.builder(state).incrementVersion().build(); diff --git a/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java b/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java index 092e6eaff8a..043eaa2708f 100644 --- a/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java @@ -71,7 +71,7 @@ public class AsyncShardFetchTests extends ESTestCase { } public void testClose() throws Exception { - DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build(); test.addSimulation(node1.getId(), response1); // first fetch, no data, still on going @@ -93,7 +93,7 @@ public class AsyncShardFetchTests extends ESTestCase { } public void testFullCircleSingleNodeSuccess() throws Exception { - DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build(); test.addSimulation(node1.getId(), response1); // first fetch, no data, still on going @@ -112,7 +112,7 @@ public class AsyncShardFetchTests extends ESTestCase { } public void testFullCircleSingleNodeFailure() throws Exception { - DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build(); // add a failed response for node1 test.addSimulation(node1.getId(), failure1); @@ -144,7 +144,7 @@ public class AsyncShardFetchTests extends ESTestCase { } public void testTwoNodesOnSetup() throws Exception { - DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).put(node2).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build(); test.addSimulation(node1.getId(), response1); test.addSimulation(node2.getId(), response2); @@ -172,7 +172,7 @@ public class AsyncShardFetchTests extends ESTestCase { } public void testTwoNodesOnSetupAndFailure() throws Exception { - DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).put(node2).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build(); test.addSimulation(node1.getId(), response1); test.addSimulation(node2.getId(), failure2); @@ -198,7 +198,7 @@ public class AsyncShardFetchTests extends ESTestCase { } public void testTwoNodesAddedInBetween() throws Exception { - DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build(); test.addSimulation(node1.getId(), response1); // no fetched data, 2 requests still on going @@ -210,7 +210,7 @@ public class AsyncShardFetchTests extends ESTestCase { test.fireSimulationAndWait(node1.getId()); // now, add a second node to the nodes, it should add it to the ongoing requests - nodes = DiscoveryNodes.builder(nodes).put(node2).build(); + nodes = DiscoveryNodes.builder(nodes).add(node2).build(); test.addSimulation(node2.getId(), response2); // no fetch data, has a new node introduced fetchData = test.fetchData(nodes, emptySet()); diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index f41b55a5ea5..a0e813663b8 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -166,8 +166,8 @@ public class GatewayMetaStateTests extends ESAllocationTestCase { private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) { Set dataOnlyRoles = Collections.singleton(DiscoveryNode.Role.DATA); - return DiscoveryNodes.builder().put(newNode("node1", masterEligible ? MASTER_DATA_ROLES : dataOnlyRoles)) - .put(newNode("master_node", MASTER_DATA_ROLES)).localNodeId("node1").masterNodeId(masterEligible ? "node1" : "master_node"); + return DiscoveryNodes.builder().add(newNode("node1", masterEligible ? MASTER_DATA_ROLES : dataOnlyRoles)) + .add(newNode("master_node", MASTER_DATA_ROLES)).localNodeId("node1").masterNodeId(masterEligible ? "node1" : "master_node"); } public void assertState(ClusterChangedEvent event, diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index e86fa6f014b..f893f73433e 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -367,7 +367,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false); } @@ -450,7 +450,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false); } @@ -468,7 +468,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime(), false); boolean changed = testAllocator.allocateUnassigned(allocation); @@ -512,7 +512,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime(), false); boolean changed = testAllocator.allocateUnassigned(allocation); @@ -556,7 +556,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTableBuilder.build()) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false); } diff --git a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index dedd26d68b5..549b4e786cb 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -306,7 +306,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime(), false); } @@ -328,7 +328,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime(), false); } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 152be45d558..c3429edc39b 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -55,6 +55,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RandomAllocationDeciderTests; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; @@ -96,6 +97,7 @@ import static org.mockito.Mockito.when; public class ClusterStateChanges extends AbstractComponent { + private final AllocationService allocationService; private final ClusterService clusterService; private final ShardStateAction.ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor; private final ShardStateAction.ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor; @@ -111,7 +113,7 @@ public class ClusterStateChanges extends AbstractComponent { public ClusterStateChanges() { super(Settings.builder().put(PATH_HOME_SETTING.getKey(), "dummy").build()); - final AllocationService allocationService = new AllocationService(settings, new AllocationDeciders(settings, + allocationService = new AllocationService(settings, new AllocationDeciders(settings, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(settings), new ReplicaAfterPrimaryActiveAllocationDecider(settings), new RandomAllocationDeciderTests.RandomAllocationDecider(getRandom())))), @@ -204,6 +206,11 @@ public class ClusterStateChanges extends AbstractComponent { return execute(transportClusterRerouteAction, request, state); } + public ClusterState deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) { + RoutingAllocation.Result rerouteResult = allocationService.deassociateDeadNodes(clusterState, reroute, reason); + return ClusterState.builder(clusterState).routingResult(rerouteResult).build(); + } + public ClusterState applyFailedShards(ClusterState clusterState, List failedShards) { List entries = failedShards.stream().map(failedShard -> new ShardStateAction.ShardEntry(failedShard.routingEntry.shardId(), failedShard.routingEntry.allocationId().getId(), diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 4477974d118..44231f29ba3 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -294,7 +294,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice if (randomBoolean()) { // add node if (state.nodes().getSize() < 10) { - DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).put(createNode()).build(); + DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(createNode()).build(); state = ClusterState.builder(state).nodes(newNodes).build(); state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after node leave updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); @@ -306,7 +306,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice if (discoveryNode.equals(state.nodes().getMasterNode()) == false) { DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).remove(discoveryNode.getId()).build(); state = ClusterState.builder(state).nodes(newNodes).build(); - state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after node join + state = cluster.deassociateDeadNodes(state, true, "removed and added a node"); updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); } } diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 2ad8ebb52f9..67fe440a292 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -125,7 +125,7 @@ public class RareClusterStateIT extends ESIntegTestCase { public ClusterState execute(ClusterState currentState) throws Exception { // inject a node ClusterState.Builder builder = ClusterState.builder(currentState); - builder.nodes(DiscoveryNodes.builder(currentState.nodes()).put(new DiscoveryNode("_non_existent", + builder.nodes(DiscoveryNodes.builder(currentState.nodes()).add(new DiscoveryNode("_non_existent", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT))); // open index @@ -154,12 +154,11 @@ public class RareClusterStateIT extends ESIntegTestCase { clusterService.submitStateUpdateTask("test-remove-injected-node", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) throws Exception { - // inject a node ClusterState.Builder builder = ClusterState.builder(currentState); builder.nodes(DiscoveryNodes.builder(currentState.nodes()).remove("_non_existent")); currentState = builder.build(); - RoutingAllocation.Result result = allocationService.reroute(currentState, "reroute"); + RoutingAllocation.Result result = allocationService.deassociateDeadNodes(currentState, true, "reroute"); return ClusterState.builder(currentState).routingResult(result).build(); } diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java index 96af4ef3671..d2e603ffd42 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java @@ -141,7 +141,7 @@ public class IndicesStoreTests extends ESTestCase { ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); - clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).put(localNode).put(new DiscoveryNode("xyz", + clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).add(localNode).add(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT))); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); int localShardId = randomInt(numShards - 1); @@ -164,7 +164,7 @@ public class IndicesStoreTests extends ESTestCase { ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); - clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).put(localNode)); + clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).add(localNode)); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); for (int i = 0; i < numShards; i++) { String relocatingNodeId = randomBoolean() ? null : "def"; @@ -186,7 +186,7 @@ public class IndicesStoreTests extends ESTestCase { final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random()); ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); - clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).put(localNode).put(new DiscoveryNode("xyz", + clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).add(localNode).add(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), emptyMap(), emptySet(), nodeVersion))); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); for (int i = 0; i < numShards; i++) { @@ -209,9 +209,9 @@ public class IndicesStoreTests extends ESTestCase { final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random()); clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()) - .put(localNode) - .put(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT)) - .put(new DiscoveryNode("def", new LocalTransportAddress("def"), emptyMap(), emptySet(), nodeVersion) // <-- only set relocating, since we're testing that in this test + .add(localNode) + .add(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT)) + .add(new DiscoveryNode("def", new LocalTransportAddress("def"), emptyMap(), emptySet(), nodeVersion) // <-- only set relocating, since we're testing that in this test )); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); for (int i = 0; i < numShards; i++) { From 11e4b0168bf73cf10c1d5c3e81aef1036746ee06 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 3 Aug 2016 10:30:20 +0200 Subject: [PATCH 047/103] Throw parsing error if range query contains multiple fields Range Query, like many other queries, used to parse when the query refers to multiple fields and the last one would win. We rather throw an exception now instead. Closes #19547 --- .../index/query/RangeQueryBuilder.java | 4 ++++ .../index/query/RangeQueryBuilderTests.java | 24 +++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index bfb7fac6b37..8e1326b1988 100644 --- a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -318,6 +318,10 @@ public class RangeQueryBuilder extends AbstractQueryBuilder i } else if (parseContext.isDeprecatedSetting(currentFieldName)) { // skip } else if (token == XContentParser.Token.START_OBJECT) { + if (fieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "[range] query doesn't support multiple fields, found [" + + fieldName + "] and [" + currentFieldName + "]"); + } fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index b30ff61f6f8..34e71427cbd 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; @@ -519,4 +520,27 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase Date: Wed, 3 Aug 2016 15:11:26 +0200 Subject: [PATCH 048/103] Throw parsing error if prefix query contains multiple fields Prefix Query, like many other queries, used to parse when the query refers to multiple fields and the last one would win. We rather throw an exception now instead. Also added tests for short prefix quer variant. --- .../index/query/PrefixQueryBuilder.java | 17 +++--- .../index/query/PrefixQueryBuilderTests.java | 56 ++++++++++++++++--- 2 files changed, 57 insertions(+), 16 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java index e1f679e0c8d..2ebd0dfc6b1 100644 --- a/core/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java @@ -64,7 +64,7 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder throw new IllegalArgumentException("field name is null or empty"); } if (value == null) { - throw new IllegalArgumentException("value cannot be null."); + throw new IllegalArgumentException("value cannot be null"); } this.fieldName = fieldName; this.value = value; @@ -120,7 +120,7 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder public static Optional fromXContent(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); - String fieldName = parser.currentName(); + String fieldName = null; String value = null; String rewrite = null; @@ -134,6 +134,10 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder } else if (parseContext.isDeprecatedSetting(currentFieldName)) { // skip } else if (token == XContentParser.Token.START_OBJECT) { + if (fieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "[prefix] query doesn't support multiple fields, found [" + + fieldName + "] and [" + currentFieldName + "]"); + } fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -149,19 +153,16 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder rewrite = parser.textOrNull(); } else { throw new ParsingException(parser.getTokenLocation(), - "[regexp] query does not support [" + currentFieldName + "]"); + "[prefix] query does not support [" + currentFieldName + "]"); } } } } else { - fieldName = currentFieldName; - value = parser.textOrNull(); + fieldName = currentFieldName; + value = parser.textOrNull(); } } - if (value == null) { - throw new ParsingException(parser.getTokenLocation(), "No value specified for prefix query"); - } return Optional.of(new PrefixQueryBuilder(fieldName, value) .rewrite(rewrite) .boost(boost) diff --git a/core/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java index aba6bec096b..22ee7ef81f3 100644 --- a/core/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java @@ -23,9 +23,12 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.prefixQuery; import static org.hamcrest.Matchers.equalTo; @@ -35,16 +38,32 @@ public class PrefixQueryBuilderTests extends AbstractQueryTestCase getAlternateVersions() { + Map alternateVersions = new HashMap<>(); + PrefixQueryBuilder prefixQuery = randomPrefixQuery(); + String contentString = "{\n" + + " \"prefix\" : {\n" + + " \"" + prefixQuery.fieldName() + "\" : \"" + prefixQuery.value() + "\"\n" + + " }\n" + + "}"; + alternateVersions.put(contentString, prefixQuery); + return alternateVersions; + } + + private static PrefixQueryBuilder randomPrefixQuery() { + String fieldName = randomBoolean() ? STRING_FIELD_NAME : randomAsciiOfLengthBetween(1, 10); + String value = randomAsciiOfLengthBetween(1, 10); + return new PrefixQueryBuilder(fieldName, value); + } + @Override protected void doAssertLuceneQuery(PrefixQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { assertThat(query, instanceOf(PrefixQuery.class)); @@ -60,16 +79,16 @@ public class PrefixQueryBuilderTests extends AbstractQueryTestCase Date: Wed, 3 Aug 2016 18:29:37 +0200 Subject: [PATCH 049/103] Throw parsing error if regexp query contains multiple fields Regexp Query, like many other queries, used to parse even when the query referred to multiple fields and the last one would win. We rather throw an exception now instead. Also added test for short prefix query variant. --- .../index/query/RegexpQueryBuilder.java | 13 +++-- .../index/query/RegexpQueryBuilderTests.java | 54 ++++++++++++++++--- 2 files changed, 53 insertions(+), 14 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java index 4a68cdecd99..7be77795e82 100644 --- a/core/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java @@ -77,7 +77,7 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder throw new IllegalArgumentException("field name is null or empty"); } if (value == null) { - throw new IllegalArgumentException("value cannot be null."); + throw new IllegalArgumentException("value cannot be null"); } this.fieldName = fieldName; this.value = value; @@ -180,10 +180,8 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder public static Optional fromXContent(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); - - String fieldName = parser.currentName(); + String fieldName = null; String rewrite = null; - String value = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; int flagsValue = RegexpQueryBuilder.DEFAULT_FLAGS_VALUE; @@ -197,6 +195,10 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder } else if (parseContext.isDeprecatedSetting(currentFieldName)) { // skip } else if (token == XContentParser.Token.START_OBJECT) { + if (fieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "[regexp] query doesn't support multiple fields, found [" + + fieldName + "] and [" + currentFieldName + "]"); + } fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -233,9 +235,6 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder } } - if (value == null) { - throw new ParsingException(parser.getTokenLocation(), "No value specified for regexp query"); - } return Optional.of(new RegexpQueryBuilder(fieldName, value) .flags(flagsValue) .maxDeterminizedStates(maxDeterminizedStates) diff --git a/core/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java index 64a62fb766f..4e5dd65153a 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java @@ -21,11 +21,14 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -34,11 +37,7 @@ public class RegexpQueryBuilderTests extends AbstractQueryTestCase flags = new ArrayList<>(); int iter = randomInt(5); @@ -56,6 +55,26 @@ public class RegexpQueryBuilderTests extends AbstractQueryTestCase getAlternateVersions() { + Map alternateVersions = new HashMap<>(); + RegexpQueryBuilder regexpQuery = randomRegexpQuery(); + String contentString = "{\n" + + " \"regexp\" : {\n" + + " \"" + regexpQuery.fieldName() + "\" : \"" + regexpQuery.value() + "\"\n" + + " }\n" + + "}"; + alternateVersions.put(contentString, regexpQuery); + return alternateVersions; + } + + private static RegexpQueryBuilder randomRegexpQuery() { + // mapped or unmapped fields + String fieldName = randomBoolean() ? STRING_FIELD_NAME : randomAsciiOfLengthBetween(1, 10); + String value = randomAsciiOfLengthBetween(1, 10); + return new RegexpQueryBuilder(fieldName, value); + } + @Override protected void doAssertLuceneQuery(RegexpQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { assertThat(query, instanceOf(RegexpQuery.class)); @@ -72,14 +91,14 @@ public class RegexpQueryBuilderTests extends AbstractQueryTestCase Date: Wed, 3 Aug 2016 18:44:23 +0200 Subject: [PATCH 050/103] Throw parsing error if wildcard query contains multiple fields Wildcard Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead. Also added test for short prefix query variant and modified the parsing code to consume the whole query object. --- .../index/query/WildcardQueryBuilder.java | 69 ++++++++++--------- .../query/WildcardQueryBuilderTests.java | 61 +++++++++++++--- 2 files changed, 85 insertions(+), 45 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java index ba223f5b1b6..105acf19a13 100644 --- a/core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java @@ -75,7 +75,7 @@ public class WildcardQueryBuilder extends AbstractQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); - - XContentParser.Token token = parser.nextToken(); - if (token != XContentParser.Token.FIELD_NAME) { - throw new ParsingException(parser.getTokenLocation(), "[wildcard] query malformed, no field"); - } - String fieldName = parser.currentName(); + String fieldName = null; String rewrite = null; - String value = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; String queryName = null; - token = parser.nextToken(); - if (token == XContentParser.Token.START_OBJECT) { - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else { - if (parseContext.getParseFieldMatcher().match(currentFieldName, WILDCARD_FIELD)) { - value = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) { - value = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { - boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) { - rewrite = parser.textOrNull(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { - queryName = parser.text(); + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip + } else if (token == XContentParser.Token.START_OBJECT) { + if (fieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "[wildcard] query doesn't support multiple fields, found [" + + fieldName + "] and [" + currentFieldName + "]"); + } + fieldName = currentFieldName; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); } else { - throw new ParsingException(parser.getTokenLocation(), - "[wildcard] query does not support [" + currentFieldName + "]"); + if (parseContext.getParseFieldMatcher().match(currentFieldName, WILDCARD_FIELD)) { + value = parser.text(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) { + value = parser.text(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + boost = parser.floatValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) { + rewrite = parser.textOrNull(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + queryName = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), + "[wildcard] query does not support [" + currentFieldName + "]"); + } } } + } else { + fieldName = parser.currentName(); + value = parser.text(); } - parser.nextToken(); - } else { - value = parser.text(); - parser.nextToken(); } - if (value == null) { - throw new ParsingException(parser.getTokenLocation(), "No value specified for wildcard query"); - } return Optional.of(new WildcardQueryBuilder(fieldName, value) .rewrite(rewrite) .boost(boost) diff --git a/core/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java index 34a006bf8e3..b987c3b9a3d 100644 --- a/core/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java @@ -21,9 +21,12 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; import org.apache.lucene.search.WildcardQuery; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -32,21 +35,36 @@ public class WildcardQueryBuilderTests extends AbstractQueryTestCase getAlternateVersions() { + Map alternateVersions = new HashMap<>(); + WildcardQueryBuilder wildcardQuery = randomWildcardQuery(); + String contentString = "{\n" + + " \"wildcard\" : {\n" + + " \"" + wildcardQuery.fieldName() + "\" : \"" + wildcardQuery.value() + "\"\n" + + " }\n" + + "}"; + alternateVersions.put(contentString, wildcardQuery); + return alternateVersions; + } + + private static WildcardQueryBuilder randomWildcardQuery() { + // mapped or unmapped field + String text = randomAsciiOfLengthBetween(1, 10); + if (randomBoolean()) { + return new WildcardQueryBuilder(STRING_FIELD_NAME, text); + } else { + return new WildcardQueryBuilder(randomAsciiOfLengthBetween(1, 10), text); + } + } + @Override protected void doAssertLuceneQuery(WildcardQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { assertThat(query, instanceOf(WildcardQuery.class)); @@ -65,14 +83,14 @@ public class WildcardQueryBuilderTests extends AbstractQueryTestCase Date: Wed, 3 Aug 2016 18:57:18 +0200 Subject: [PATCH 051/103] Throw parsing error if match_phrase query contains multiple fields Match phrase Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead. Also added test for short prefix query variant and modified the parsing code to consume the whole query object. --- .../index/query/MatchPhraseQueryBuilder.java | 82 +++++++++---------- .../query/MatchPhraseQueryBuilderTests.java | 43 +++++++++- 2 files changed, 78 insertions(+), 47 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java index 399b036c7b5..c9667c98778 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -49,7 +50,7 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); - - XContentParser.Token token = parser.nextToken(); - if (token != XContentParser.Token.FIELD_NAME) { - throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query malformed, no field"); - } - String fieldName = parser.currentName(); - + String fieldName = null; Object value = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; String analyzer = null; int slop = MatchQuery.DEFAULT_PHRASE_SLOP; String queryName = null; - - token = parser.nextToken(); - if (token == XContentParser.Token.START_OBJECT) { - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.QUERY_FIELD)) { - value = parser.objectText(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.ANALYZER_FIELD)) { - analyzer = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { - boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) { - slop = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { - queryName = parser.text(); + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip + } else if (token == XContentParser.Token.START_OBJECT) { + if (fieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "[match_phrase] query doesn't support multiple fields, found [" + + fieldName + "] and [" + currentFieldName + "]"); + } + fieldName = currentFieldName; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.QUERY_FIELD)) { + value = parser.objectText(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.ANALYZER_FIELD)) { + analyzer = parser.text(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + boost = parser.floatValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) { + slop = parser.intValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + queryName = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), + "[" + NAME + "] query does not support [" + currentFieldName + "]"); + } } else { throw new ParsingException(parser.getTokenLocation(), - "[" + NAME + "] query does not support [" + currentFieldName + "]"); + "[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); } - } else { - throw new ParsingException(parser.getTokenLocation(), - "[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); } + } else { + fieldName = parser.currentName(); + value = parser.objectText(); } - parser.nextToken(); - } else { - value = parser.objectText(); - // move to the next token - token = parser.nextToken(); - if (token != XContentParser.Token.END_OBJECT) { - throw new ParsingException(parser.getTokenLocation(), "[" + NAME - + "] query parsed in simplified form, with direct field name, " - + "but included more options than just the field name, possibly use its 'options' form, with 'query' element?"); - } - } - - if (value == null) { - throw new ParsingException(parser.getTokenLocation(), "No text specified for text query"); } MatchPhraseQueryBuilder matchQuery = new MatchPhraseQueryBuilder(fieldName, value); diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java index d20bf655e2c..e7c2e3d8abb 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java @@ -24,10 +24,13 @@ import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.instanceOf; @@ -66,6 +69,20 @@ public class MatchPhraseQueryBuilderTests extends AbstractQueryTestCase getAlternateVersions() { + Map alternateVersions = new HashMap<>(); + MatchPhraseQueryBuilder matchPhraseQuery = new MatchPhraseQueryBuilder(randomAsciiOfLengthBetween(1, 10), + randomAsciiOfLengthBetween(1, 10)); + String contentString = "{\n" + + " \"match_phrase\" : {\n" + + " \"" + matchPhraseQuery.fieldName() + "\" : \"" + matchPhraseQuery.value() + "\"\n" + + " }\n" + + "}"; + alternateVersions.put(contentString, matchPhraseQuery); + return alternateVersions; + } + @Override protected void doAssertLuceneQuery(MatchPhraseQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { assertThat(query, notNullValue()); @@ -76,16 +93,16 @@ public class MatchPhraseQueryBuilderTests extends AbstractQueryTestCase Date: Wed, 3 Aug 2016 19:00:26 +0200 Subject: [PATCH 052/103] [TEST] check validation error messages in IdsQueryBuilderTests --- .../org/elasticsearch/index/query/IdsQueryBuilderTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java index 4ad90edc8cb..1793623ce2f 100644 --- a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java @@ -98,14 +98,14 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase new IdsQueryBuilder((String[])null); fail("must be not null"); } catch(IllegalArgumentException e) { - //all good + assertEquals("[ids] types cannot be null", e.getMessage()); } try { new IdsQueryBuilder().addIds((String[])null); fail("must be not null"); } catch(IllegalArgumentException e) { - //all good + assertEquals("[ids] ids cannot be null", e.getMessage()); } } From ad8f5e7e4b8900117d8d6f0a4fe6994a30fc3b44 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 3 Aug 2016 19:11:54 +0200 Subject: [PATCH 053/103] Throw parsing error if geo_distance query contains multiple fields Geo distance Query, like many other queries, used to parse even when the query referred to multiple fields and the last one would win. We rather throw an exception now instead. --- .../index/query/GeoDistanceQueryBuilder.java | 5 ++++- .../query/GeoDistanceQueryBuilderTests.java | 21 +++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java index 92b30747a14..1233df11714 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java @@ -359,9 +359,12 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder field : { lat : 30, lon : 12 } String currentName = parser.currentName(); - assert currentFieldName != null; fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java index 387df7ac3ca..decdf50b168 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceQuery; import org.elasticsearch.Version; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; @@ -474,4 +475,24 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase failingQueryBuilder.toQuery(shardContext)); assertThat(e.getMessage(), containsString("failed to find geo_point field [unmapped]")); } + + public void testParseFailsWithMultipleFields() throws IOException { + String json = "{\n" + + " \"geo_distance\" : {\n" + + " \"point1\" : {\n" + + " \"lat\" : 30, \"lon\" : 12\n" + + " },\n" + + " \"point2\" : {\n" + + " \"lat\" : 30, \"lon\" : 12\n" + + " }\n" + + " }\n" + + "}"; + + try { + parseQuery(json); + fail("parseQuery should have failed"); + } catch(ParsingException e) { + assertEquals("[geo_distance] query doesn't support multiple fields, found [point1] and [point2]", e.getMessage()); + } + } } From f7b3dce4bcca78745f6af73e03ceb5009c9448b5 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 3 Aug 2016 19:19:45 +0200 Subject: [PATCH 054/103] Throw parsing error if match_phrase_prefix query contains multiple fields Match phrase prefix Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead. Also added test for short prefix query variant and modified the parsing code to consume the whole query object. --- .../query/MatchPhrasePrefixQueryBuilder.java | 83 +++++++++---------- .../MatchPhrasePrefixQueryBuilderTests.java | 45 +++++++++- 2 files changed, 80 insertions(+), 48 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java index b53f10989dc..ecce9e66b10 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java @@ -192,62 +192,55 @@ public class MatchPhrasePrefixQueryBuilder extends AbstractQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); - - XContentParser.Token token = parser.nextToken(); - if (token != XContentParser.Token.FIELD_NAME) { - throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query malformed, no field"); - } - String fieldName = parser.currentName(); - + String fieldName = null; Object value = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; String analyzer = null; int slop = MatchQuery.DEFAULT_PHRASE_SLOP; int maxExpansion = FuzzyQuery.defaultMaxExpansions; String queryName = null; - - token = parser.nextToken(); - if (token == XContentParser.Token.START_OBJECT) { - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.QUERY_FIELD)) { - value = parser.objectText(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.ANALYZER_FIELD)) { - analyzer = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { - boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchPhraseQueryBuilder.SLOP_FIELD)) { - slop = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) { - maxExpansion = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { - queryName = parser.text(); + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip + } else if (token == XContentParser.Token.START_OBJECT) { + if (fieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "[match_phrase_prefix] query doesn't support multiple " + + "fields, found [" + fieldName + "] and [" + currentFieldName + "]"); + } + fieldName = currentFieldName; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.QUERY_FIELD)) { + value = parser.objectText(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.ANALYZER_FIELD)) { + analyzer = parser.text(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + boost = parser.floatValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchPhraseQueryBuilder.SLOP_FIELD)) { + slop = parser.intValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) { + maxExpansion = parser.intValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + queryName = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), + "[" + NAME + "] query does not support [" + currentFieldName + "]"); + } } else { throw new ParsingException(parser.getTokenLocation(), - "[" + NAME + "] query does not support [" + currentFieldName + "]"); + "[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); } - } else { - throw new ParsingException(parser.getTokenLocation(), - "[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); } + } else { + fieldName = parser.currentName(); + value = parser.objectText(); } - parser.nextToken(); - } else { - value = parser.objectText(); - // move to the next token - token = parser.nextToken(); - if (token != XContentParser.Token.END_OBJECT) { - throw new ParsingException(parser.getTokenLocation(), "[" + NAME - + "] query parsed in simplified form, with direct field name, " - + "but included more options than just the field name, possibly use its 'options' form, with 'query' element?"); - } - } - - if (value == null) { - throw new ParsingException(parser.getTokenLocation(), "No text specified for text query"); } MatchPhrasePrefixQueryBuilder matchQuery = new MatchPhrasePrefixQueryBuilder(fieldName, value); diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java index f5b2690db64..83a64ba8002 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java @@ -23,11 +23,15 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.containsString; @@ -69,6 +73,20 @@ public class MatchPhrasePrefixQueryBuilderTests extends AbstractQueryTestCase getAlternateVersions() { + Map alternateVersions = new HashMap<>(); + MatchPhrasePrefixQueryBuilder matchPhrasePrefixQuery = new MatchPhrasePrefixQueryBuilder(randomAsciiOfLengthBetween(1, 10), + randomAsciiOfLengthBetween(1, 10)); + String contentString = "{\n" + + " \"match_phrase_prefix\" : {\n" + + " \"" + matchPhrasePrefixQuery.fieldName() + "\" : \"" + matchPhrasePrefixQuery.value() + "\"\n" + + " }\n" + + "}"; + alternateVersions.put(contentString, matchPhrasePrefixQuery); + return alternateVersions; + } + @Override protected void doAssertLuceneQuery(MatchPhrasePrefixQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { @@ -81,16 +99,16 @@ public class MatchPhrasePrefixQueryBuilderTests extends AbstractQueryTestCase Date: Wed, 3 Aug 2016 19:30:37 +0200 Subject: [PATCH 055/103] Throw parsing error if match query contains multiple fields Match Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead. Also added test for short prefix query variant and modified the parsing code to consume the whole query object. --- .../index/query/MatchQueryBuilder.java | 146 +++++++++--------- .../index/query/MatchQueryBuilderTests.java | 36 +++++ 2 files changed, 108 insertions(+), 74 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java index 04fa9120a76..64d025adf71 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java @@ -510,13 +510,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder { public static Optional fromXContent(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); - - XContentParser.Token token = parser.nextToken(); - if (token != XContentParser.Token.FIELD_NAME) { - throw new ParsingException(parser.getTokenLocation(), "[" + MatchQueryBuilder.NAME + "] query malformed, no field"); - } - String fieldName = parser.currentName(); - + String fieldName = null; MatchQuery.Type type = MatchQuery.Type.BOOLEAN; Object value = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; @@ -533,80 +527,84 @@ public class MatchQueryBuilder extends AbstractQueryBuilder { Float cutOffFrequency = null; ZeroTermsQuery zeroTermsQuery = MatchQuery.DEFAULT_ZERO_TERMS_QUERY; String queryName = null; - - token = parser.nextToken(); - if (token == XContentParser.Token.START_OBJECT) { - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) { - value = parser.objectText(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TYPE_FIELD)) { - String tStr = parser.text(); - if ("boolean".equals(tStr)) { - type = MatchQuery.Type.BOOLEAN; - } else if ("phrase".equals(tStr)) { - type = MatchQuery.Type.PHRASE; - } else if ("phrase_prefix".equals(tStr) || ("phrasePrefix".equals(tStr))) { - type = MatchQuery.Type.PHRASE_PREFIX; - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query does not support type " + tStr); - } - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ANALYZER_FIELD)) { - analyzer = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { - boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) { - slop = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) { - fuzziness = Fuzziness.parse(parser); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) { - prefixLength = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) { - maxExpansion = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, OPERATOR_FIELD)) { - operator = Operator.fromString(parser.text()); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) { - minimumShouldMatch = parser.textOrNull(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_REWRITE_FIELD)) { - fuzzyRewrite = parser.textOrNull(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_TRANSPOSITIONS_FIELD)) { - fuzzyTranspositions = parser.booleanValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, LENIENT_FIELD)) { - lenient = parser.booleanValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, CUTOFF_FREQUENCY_FIELD)) { - cutOffFrequency = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ZERO_TERMS_QUERY_FIELD)) { - String zeroTermsDocs = parser.text(); - if ("none".equalsIgnoreCase(zeroTermsDocs)) { - zeroTermsQuery = MatchQuery.ZeroTermsQuery.NONE; - } else if ("all".equalsIgnoreCase(zeroTermsDocs)) { - zeroTermsQuery = MatchQuery.ZeroTermsQuery.ALL; + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip + } else if (token == XContentParser.Token.START_OBJECT) { + if (fieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "[match] query doesn't support multiple fields, found [" + + fieldName + "] and [" + currentFieldName + "]"); + } + fieldName = currentFieldName; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) { + value = parser.objectText(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TYPE_FIELD)) { + String tStr = parser.text(); + if ("boolean".equals(tStr)) { + type = MatchQuery.Type.BOOLEAN; + } else if ("phrase".equals(tStr)) { + type = MatchQuery.Type.PHRASE; + } else if ("phrase_prefix".equals(tStr) || ("phrasePrefix".equals(tStr))) { + type = MatchQuery.Type.PHRASE_PREFIX; + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query does not support type " + tStr); + } + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ANALYZER_FIELD)) { + analyzer = parser.text(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + boost = parser.floatValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) { + slop = parser.intValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) { + fuzziness = Fuzziness.parse(parser); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) { + prefixLength = parser.intValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) { + maxExpansion = parser.intValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, OPERATOR_FIELD)) { + operator = Operator.fromString(parser.text()); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) { + minimumShouldMatch = parser.textOrNull(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_REWRITE_FIELD)) { + fuzzyRewrite = parser.textOrNull(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_TRANSPOSITIONS_FIELD)) { + fuzzyTranspositions = parser.booleanValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, LENIENT_FIELD)) { + lenient = parser.booleanValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, CUTOFF_FREQUENCY_FIELD)) { + cutOffFrequency = parser.floatValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ZERO_TERMS_QUERY_FIELD)) { + String zeroTermsDocs = parser.text(); + if ("none".equalsIgnoreCase(zeroTermsDocs)) { + zeroTermsQuery = MatchQuery.ZeroTermsQuery.NONE; + } else if ("all".equalsIgnoreCase(zeroTermsDocs)) { + zeroTermsQuery = MatchQuery.ZeroTermsQuery.ALL; + } else { + throw new ParsingException(parser.getTokenLocation(), + "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); + } + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), - "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); + "[" + NAME + "] query does not support [" + currentFieldName + "]"); } - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { - queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), - "[" + NAME + "] query does not support [" + currentFieldName + "]"); + "[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); } - } else { - throw new ParsingException(parser.getTokenLocation(), - "[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); } - } - parser.nextToken(); - } else { - value = parser.objectText(); - // move to the next token - token = parser.nextToken(); - if (token != XContentParser.Token.END_OBJECT) { - throw new ParsingException(parser.getTokenLocation(), "[match] query parsed in simplified form, with direct field name, " - + "but included more options than just the field name, possibly use its 'options' form, with 'query' element?"); + } else { + fieldName = parser.currentName(); + value = parser.objectText(); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 6a9a07c59dd..8772b360633 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.lucene.search.Queries; @@ -40,7 +41,9 @@ import org.elasticsearch.test.AbstractQueryTestCase; import org.hamcrest.Matcher; import java.io.IOException; +import java.util.HashMap; import java.util.Locale; +import java.util.Map; import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.instanceOf; @@ -118,6 +121,19 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase getAlternateVersions() { + Map alternateVersions = new HashMap<>(); + MatchQueryBuilder matchQuery = new MatchQueryBuilder(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)); + String contentString = "{\n" + + " \"match\" : {\n" + + " \"" + matchQuery.fieldName() + "\" : \"" + matchQuery.value() + "\"\n" + + " }\n" + + "}"; + alternateVersions.put(contentString, matchQuery); + return alternateVersions; + } + @Override protected void doAssertLuceneQuery(MatchQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { assertThat(query, notNullValue()); @@ -406,4 +422,24 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase Date: Wed, 3 Aug 2016 19:44:56 +0200 Subject: [PATCH 056/103] Throw parsing error if common terms query contains multiple fields Common Terms Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead. Also added test for short prefix query variant and modified the parsing code to consume the whole query object. --- .../index/query/CommonTermsQueryBuilder.java | 134 +++++++++--------- .../query/CommonTermsQueryBuilderTests.java | 41 +++++- 2 files changed, 105 insertions(+), 70 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java index 982b5231b42..c2ad9983290 100644 --- a/core/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java @@ -102,7 +102,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); - XContentParser.Token token = parser.nextToken(); - if (token != XContentParser.Token.FIELD_NAME) { - throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query malformed, no field"); - } - String fieldName = parser.currentName(); + + String fieldName = null; Object text = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; String analyzer = null; @@ -280,78 +277,79 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder getAlternateVersions() { + Map alternateVersions = new HashMap<>(); + CommonTermsQueryBuilder commonTermsQuery = new CommonTermsQueryBuilder(randomAsciiOfLengthBetween(1, 10), + randomAsciiOfLengthBetween(1, 10)); + String contentString = "{\n" + + " \"common\" : {\n" + + " \"" + commonTermsQuery.fieldName() + "\" : \"" + commonTermsQuery.value() + "\"\n" + + " }\n" + + "}"; + alternateVersions.put(contentString, commonTermsQuery); + return alternateVersions; + } + @Override protected void doAssertLuceneQuery(CommonTermsQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { assertThat(query, instanceOf(ExtendedCommonTermsQuery.class)); @@ -98,14 +115,14 @@ public class CommonTermsQueryBuilderTests extends AbstractQueryTestCase Date: Wed, 3 Aug 2016 19:52:42 +0200 Subject: [PATCH 057/103] Throw parsing error if span_term query contains multiple fields Span term Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead. Also modified the parsing code to consume the whole query object. --- .../index/query/SpanTermQueryBuilder.java | 66 +++++++++---------- .../query/SpanTermQueryBuilderTests.java | 22 +++++++ 2 files changed, 52 insertions(+), 36 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java index 978ed1be3fe..1a77c73b0cc 100644 --- a/core/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java @@ -94,49 +94,43 @@ public class SpanTermQueryBuilder extends BaseTermQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, ParsingException { XContentParser parser = parseContext.parser(); - - XContentParser.Token token = parser.currentToken(); - if (token == XContentParser.Token.START_OBJECT) { - token = parser.nextToken(); - } - - assert token == XContentParser.Token.FIELD_NAME; - String fieldName = parser.currentName(); - - + String fieldName = null; Object value = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; String queryName = null; - token = parser.nextToken(); - if (token == XContentParser.Token.START_OBJECT) { - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else { - if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) { - value = parser.objectBytes(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, BaseTermQueryBuilder.VALUE_FIELD)) { - value = parser.objectBytes(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { - boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { - queryName = parser.text(); + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if (fieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "[span_term] query doesn't support multiple fields, found [" + + fieldName + "] and [" + currentFieldName + "]"); + } + fieldName = currentFieldName; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); } else { - throw new ParsingException(parser.getTokenLocation(), - "[span_term] query does not support [" + currentFieldName + "]"); + if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) { + value = parser.objectBytes(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, BaseTermQueryBuilder.VALUE_FIELD)) { + value = parser.objectBytes(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + boost = parser.floatValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + queryName = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), + "[span_term] query does not support [" + currentFieldName + "]"); + } } } + } else { + fieldName = parser.currentName(); + value = parser.objectBytes(); } - parser.nextToken(); - } else { - value = parser.objectBytes(); - // move to the next token - parser.nextToken(); - } - - if (value == null) { - throw new ParsingException(parser.getTokenLocation(), "No value specified for term query"); } SpanTermQueryBuilder result = new SpanTermQueryBuilder(fieldName, value); diff --git a/core/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java index 546c8536ad9..da76dd15371 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.spans.SpanTermQuery; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.index.mapper.MappedFieldType; @@ -108,4 +109,25 @@ public class SpanTermQueryBuilderTests extends AbstractTermQueryTestCase Date: Wed, 3 Aug 2016 19:53:08 +0200 Subject: [PATCH 058/103] [TEST] check validation error messages in AbstractTermQueryTestCase --- .../elasticsearch/index/query/AbstractTermQueryTestCase.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/index/query/AbstractTermQueryTestCase.java b/core/src/test/java/org/elasticsearch/index/query/AbstractTermQueryTestCase.java index a2eec493c9c..e510af9892c 100644 --- a/core/src/test/java/org/elasticsearch/index/query/AbstractTermQueryTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/query/AbstractTermQueryTestCase.java @@ -38,14 +38,14 @@ public abstract class AbstractTermQueryTestCase Date: Wed, 3 Aug 2016 19:55:31 +0200 Subject: [PATCH 059/103] [TEST] test that term query throws error when made against multiple fields --- .../index/query/TermQueryBuilderTests.java | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java index a632f3803dd..fbb2e67b621 100644 --- a/core/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java @@ -149,4 +149,24 @@ public class TermQueryBuilderTests extends AbstractTermQueryTestCase Date: Wed, 3 Aug 2016 20:01:21 +0200 Subject: [PATCH 060/103] Throw parsing error if fuzzy query contains multiple fields Fuzzy Query, like many other queries, used to parse even when the query referred to multiple fields and the first one would win. We rather throw an exception now instead. Also added test for short prefix query variant and modified the parsing code to consume the whole query object. --- .../index/query/FuzzyQueryBuilder.java | 88 +++++++++---------- .../index/query/FuzzyQueryBuilderTests.java | 43 ++++++++- 2 files changed, 82 insertions(+), 49 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java index 5cf8275a076..aa0e6e2ff52 100644 --- a/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java @@ -152,7 +152,7 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder i */ public FuzzyQueryBuilder(String fieldName, Object value) { if (Strings.isEmpty(fieldName)) { - throw new IllegalArgumentException("field name cannot be null or empty."); + throw new IllegalArgumentException("field name cannot be null or empty"); } if (value == null) { throw new IllegalArgumentException("query value cannot be null"); @@ -258,63 +258,59 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder i public static Optional fromXContent(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); - - XContentParser.Token token = parser.nextToken(); - if (token != XContentParser.Token.FIELD_NAME) { - throw new ParsingException(parser.getTokenLocation(), "[fuzzy] query malformed, no field"); - } - - String fieldName = parser.currentName(); + String fieldName = null; Object value = null; - Fuzziness fuzziness = FuzzyQueryBuilder.DEFAULT_FUZZINESS; int prefixLength = FuzzyQueryBuilder.DEFAULT_PREFIX_LENGTH; int maxExpansions = FuzzyQueryBuilder.DEFAULT_MAX_EXPANSIONS; boolean transpositions = FuzzyQueryBuilder.DEFAULT_TRANSPOSITIONS; String rewrite = null; - String queryName = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; - - token = parser.nextToken(); - if (token == XContentParser.Token.START_OBJECT) { - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else { - if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) { - value = parser.objectBytes(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) { - value = parser.objectBytes(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { - boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) { - fuzziness = Fuzziness.parse(parser); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) { - prefixLength = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) { - maxExpansions = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TRANSPOSITIONS_FIELD)) { - transpositions = parser.booleanValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) { - rewrite = parser.textOrNull(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { - queryName = parser.text(); + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip + } else if (token == XContentParser.Token.START_OBJECT) { + if (fieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "[fuzzy] query doesn't support multiple fields, found [" + + fieldName + "] and [" + currentFieldName + "]"); + } + fieldName = currentFieldName; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); } else { - throw new ParsingException(parser.getTokenLocation(), "[fuzzy] query does not support [" + currentFieldName + "]"); + if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) { + value = parser.objectBytes(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) { + value = parser.objectBytes(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + boost = parser.floatValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) { + fuzziness = Fuzziness.parse(parser); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) { + prefixLength = parser.intValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) { + maxExpansions = parser.intValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TRANSPOSITIONS_FIELD)) { + transpositions = parser.booleanValue(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) { + rewrite = parser.textOrNull(); + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + queryName = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), "[fuzzy] query does not support [" + currentFieldName + "]"); + } } } + } else { + fieldName = parser.currentName(); + value = parser.objectBytes(); } - parser.nextToken(); - } else { - value = parser.objectBytes(); - // move to the next token - parser.nextToken(); - } - - if (value == null) { - throw new ParsingException(parser.getTokenLocation(), "no value specified for fuzzy query"); } return Optional.of(new FuzzyQueryBuilder(fieldName, value) .fuzziness(fuzziness) diff --git a/core/src/test/java/org/elasticsearch/index/query/FuzzyQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/FuzzyQueryBuilderTests.java index 92583403638..0330012ddf2 100644 --- a/core/src/test/java/org/elasticsearch/index/query/FuzzyQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/FuzzyQueryBuilderTests.java @@ -23,11 +23,14 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.test.AbstractQueryTestCase; import org.hamcrest.Matchers; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -55,6 +58,19 @@ public class FuzzyQueryBuilderTests extends AbstractQueryTestCase getAlternateVersions() { + Map alternateVersions = new HashMap<>(); + FuzzyQueryBuilder fuzzyQuery = new FuzzyQueryBuilder(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)); + String contentString = "{\n" + + " \"fuzzy\" : {\n" + + " \"" + fuzzyQuery.fieldName() + "\" : \"" + fuzzyQuery.value() + "\"\n" + + " }\n" + + "}"; + alternateVersions.put(contentString, fuzzyQuery); + return alternateVersions; + } + @Override protected void doAssertLuceneQuery(FuzzyQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { assertThat(query, instanceOf(FuzzyQuery.class)); @@ -65,21 +81,21 @@ public class FuzzyQueryBuilderTests extends AbstractQueryTestCase Date: Wed, 3 Aug 2016 20:55:00 +0200 Subject: [PATCH 061/103] Make query parsing stricter by requiring each parser to stop at END_OBJECT token Instead of being lenient in QueryParseContext#parseInnerQueryBuilder we check that the token where the parser stopped reading was END_OBJECT, and throw error otherwise. This is a best effort to verify that the parsers read a whole object rather than stepping out in the middle of it due to malformed queries. --- .../org/elasticsearch/index/query/QueryParseContext.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java index 478bcbc51d4..95fe0094bad 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java @@ -115,10 +115,11 @@ public class QueryParseContext implements ParseFieldMatcherSupplier { @SuppressWarnings("unchecked") Optional result = (Optional) indicesQueriesRegistry.lookup(queryName, parseFieldMatcher, parser.getTokenLocation()).fromXContent(this); - if (parser.currentToken() == XContentParser.Token.END_OBJECT) { - // if we are at END_OBJECT, move to the next one... - parser.nextToken(); + if (parser.currentToken() != XContentParser.Token.END_OBJECT) { + throw new ParsingException(parser.getTokenLocation(), + "[" + queryName + "] malformed query, expected [END_OBJECT] but found [" + parser.currentToken() + "]"); } + parser.nextToken(); return result; } From 6a5c44a271cf84ace43215da1e9c37627a2685c1 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 3 Aug 2016 21:04:44 +0200 Subject: [PATCH 062/103] fix line length in FuzzyQueryBuilder --- .../java/org/elasticsearch/index/query/FuzzyQueryBuilder.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java index aa0e6e2ff52..31cce0885a1 100644 --- a/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java @@ -303,7 +303,8 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder i } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { queryName = parser.text(); } else { - throw new ParsingException(parser.getTokenLocation(), "[fuzzy] query does not support [" + currentFieldName + "]"); + throw new ParsingException(parser.getTokenLocation(), + "[fuzzy] query does not support [" + currentFieldName + "]"); } } } From 841d5a210e12bc629336396893e18872d2b1f46f Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 17 Jun 2016 09:22:39 +0200 Subject: [PATCH 063/103] Update to Jackson 2.8.1 This commit updates Jackson to the 2.8.1 version, which is more strict when it comes to build objects. It also adds the snakeyaml dependency that was previously shaded in jackson libs. It also closes #18076 --- .../resources/checkstyle_suppressions.xml | 2 - buildSrc/version.properties | 3 +- .../licenses/jackson-core-2.7.1.jar.sha1 | 1 - .../licenses/jackson-core-2.8.1.jar.sha1 | 1 + core/build.gradle | 1 + .../xcontent/json/JsonXContentGenerator.java | 5 +- .../xcontent/json/JsonXContentParser.java | 5 - .../search/MultiSearchRequestTests.java | 61 ++++-- .../builder/XContentBuilderTests.java | 1 + .../mapper/core/TextFieldMapperTests.java | 5 +- .../string/SimpleStringMappingTests.java | 5 +- .../index/translog/TranslogTests.java | 15 +- .../nodesinfo/NodeInfoStreamingTests.java | 7 +- .../SignificantTermsSignificanceScoreIT.java | 89 ++++++--- .../bucket/histogram/ExtendedBoundsTests.java | 16 +- .../threadpool/ThreadPoolStatsTests.java | 12 +- .../licenses/jackson-core-2.7.1.jar.sha1 | 1 - .../licenses/jackson-core-2.8.1.jar.sha1 | 1 + .../jackson-dataformat-cbor-2.7.1.jar.sha1 | 1 - .../jackson-dataformat-cbor-2.8.1.jar.sha1 | 1 + .../jackson-dataformat-smile-2.7.1.jar.sha1 | 1 - .../jackson-dataformat-smile-2.8.1.jar.sha1 | 1 + .../jackson-dataformat-yaml-2.7.1.jar.sha1 | 1 - .../jackson-dataformat-yaml-2.8.1.jar.sha1 | 1 + distribution/licenses/snakeyaml-1.15.jar.sha1 | 1 + distribution/licenses/snakeyaml-LICENSE.txt | 176 ++++++++++++++++++ distribution/licenses/snakeyaml-NOTICE.txt | 24 +++ .../50_multi_search_template.yaml | 2 +- 28 files changed, 374 insertions(+), 66 deletions(-) delete mode 100644 client/sniffer/licenses/jackson-core-2.7.1.jar.sha1 create mode 100644 client/sniffer/licenses/jackson-core-2.8.1.jar.sha1 delete mode 100644 distribution/licenses/jackson-core-2.7.1.jar.sha1 create mode 100644 distribution/licenses/jackson-core-2.8.1.jar.sha1 delete mode 100644 distribution/licenses/jackson-dataformat-cbor-2.7.1.jar.sha1 create mode 100644 distribution/licenses/jackson-dataformat-cbor-2.8.1.jar.sha1 delete mode 100644 distribution/licenses/jackson-dataformat-smile-2.7.1.jar.sha1 create mode 100644 distribution/licenses/jackson-dataformat-smile-2.8.1.jar.sha1 delete mode 100644 distribution/licenses/jackson-dataformat-yaml-2.7.1.jar.sha1 create mode 100644 distribution/licenses/jackson-dataformat-yaml-2.8.1.jar.sha1 create mode 100644 distribution/licenses/snakeyaml-1.15.jar.sha1 create mode 100644 distribution/licenses/snakeyaml-LICENSE.txt create mode 100644 distribution/licenses/snakeyaml-NOTICE.txt diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 64a3cb29a54..86ec8544994 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -676,7 +676,6 @@ - @@ -1009,7 +1008,6 @@ - diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 9fb8d89e824..357e5379e19 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -4,7 +4,8 @@ lucene = 6.1.0 # optional dependencies spatial4j = 0.6 jts = 1.13 -jackson = 2.7.1 +jackson = 2.8.1 +snakeyaml = 1.15 log4j = 1.2.17 slf4j = 1.6.2 jna = 4.2.2 diff --git a/client/sniffer/licenses/jackson-core-2.7.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.7.1.jar.sha1 deleted file mode 100644 index 73831ed2d51..00000000000 --- a/client/sniffer/licenses/jackson-core-2.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4127b62db028f981e81caa248953c0899d720f98 \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.8.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.8.1.jar.sha1 new file mode 100644 index 00000000000..b92131d6fab --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.8.1.jar.sha1 @@ -0,0 +1 @@ +fd13b1c033741d48291315c6370f7d475a42dccf \ No newline at end of file diff --git a/core/build.gradle b/core/build.gradle index 2cf20b1c774..0e87c21757b 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -69,6 +69,7 @@ dependencies { compile 'org.joda:joda-convert:1.2' // json and yaml + compile "org.yaml:snakeyaml:${versions.snakeyaml}" compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}" compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}" diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java index a7c541b84fc..dd95e0d1df5 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java @@ -24,6 +24,7 @@ import com.fasterxml.jackson.core.JsonStreamContext; import com.fasterxml.jackson.core.base.GeneratorBase; import com.fasterxml.jackson.core.filter.FilteringGeneratorDelegate; import com.fasterxml.jackson.core.io.SerializedString; +import com.fasterxml.jackson.core.json.JsonWriteContext; import com.fasterxml.jackson.core.util.DefaultIndenter; import com.fasterxml.jackson.core.util.DefaultPrettyPrinter; import org.elasticsearch.common.bytes.BytesReference; @@ -271,7 +272,9 @@ public class JsonXContentGenerator implements XContentGenerator { public void writeEndRaw() { assert base != null : "JsonGenerator should be of instance GeneratorBase but was: " + generator.getClass(); if (base != null) { - base.getOutputContext().writeValue(); + JsonStreamContext context = base.getOutputContext(); + assert (context instanceof JsonWriteContext) : "Expected an instance of JsonWriteContext but was: " + context.getClass(); + ((JsonWriteContext) context).writeValue(); } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java index fbdf66e73ee..5728e6035e6 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java @@ -87,11 +87,6 @@ public class JsonXContentParser extends AbstractXContentParser { @Override public BytesRef utf8Bytes() throws IOException { - // Tentative workaround for https://github.com/elastic/elasticsearch/issues/8629 - // TODO: Remove this when we upgrade jackson to 2.6.x. - if (parser.getTextLength() == 0) { - return new BytesRef(); - } return new BytesRef(CharBuffer.wrap(parser.getTextCharacters(), parser.getTextOffset(), parser.getTextLength())); } diff --git a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index 690f49171a6..2eab8674bdd 100644 --- a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -44,19 +44,33 @@ import static org.hamcrest.Matchers.nullValue; public class MultiSearchRequestTests extends ESTestCase { public void testSimpleAdd() throws Exception { MultiSearchRequest request = parseMultiSearchRequest("/org/elasticsearch/action/search/simple-msearch1.json"); - assertThat(request.requests().size(), equalTo(8)); - assertThat(request.requests().get(0).indices()[0], equalTo("test")); - assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); - assertThat(request.requests().get(0).types().length, equalTo(0)); - assertThat(request.requests().get(1).indices()[0], equalTo("test")); - assertThat(request.requests().get(1).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); - assertThat(request.requests().get(1).types()[0], equalTo("type1")); - assertThat(request.requests().get(2).indices()[0], equalTo("test")); - assertThat(request.requests().get(2).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, false, IndicesOptions.strictExpandOpenAndForbidClosed()))); - assertThat(request.requests().get(3).indices()[0], equalTo("test")); - assertThat(request.requests().get(3).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); - assertThat(request.requests().get(4).indices()[0], equalTo("test")); - assertThat(request.requests().get(4).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, false, false, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(request.requests().size(), + equalTo(8)); + assertThat(request.requests().get(0).indices()[0], + equalTo("test")); + assertThat(request.requests().get(0).indicesOptions(), + equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(request.requests().get(0).types().length, + equalTo(0)); + assertThat(request.requests().get(1).indices()[0], + equalTo("test")); + assertThat(request.requests().get(1).indicesOptions(), + equalTo(IndicesOptions.fromOptions(false, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(request.requests().get(1).types()[0], + equalTo("type1")); + assertThat(request.requests().get(2).indices()[0], + equalTo("test")); + assertThat(request.requests().get(2).indicesOptions(), + equalTo(IndicesOptions.fromOptions(false, true, true, false, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(request.requests().get(3).indices()[0], + equalTo("test")); + assertThat(request.requests().get(3).indicesOptions(), + equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(request.requests().get(4).indices()[0], + equalTo("test")); + assertThat(request.requests().get(4).indicesOptions(), + equalTo(IndicesOptions.fromOptions(true, false, false, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(request.requests().get(5).indices(), is(Strings.EMPTY_ARRAY)); assertThat(request.requests().get(5).types().length, equalTo(0)); assertThat(request.requests().get(6).indices(), is(Strings.EMPTY_ARRAY)); @@ -119,10 +133,27 @@ public class MultiSearchRequestTests extends ESTestCase { } public void testResponseErrorToXContent() throws IOException { - MultiSearchResponse response = new MultiSearchResponse(new MultiSearchResponse.Item[]{new MultiSearchResponse.Item(null, new IllegalStateException("foobar")), new MultiSearchResponse.Item(null, new IllegalStateException("baaaaaazzzz"))}); + MultiSearchResponse response = new MultiSearchResponse( + new MultiSearchResponse.Item[]{ + new MultiSearchResponse.Item(null, new IllegalStateException("foobar")), + new MultiSearchResponse.Item(null, new IllegalStateException("baaaaaazzzz")) + }); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); response.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals("\"responses\"[{\"error\":{\"root_cause\":[{\"type\":\"illegal_state_exception\",\"reason\":\"foobar\"}],\"type\":\"illegal_state_exception\",\"reason\":\"foobar\"},\"status\":500},{\"error\":{\"root_cause\":[{\"type\":\"illegal_state_exception\",\"reason\":\"baaaaaazzzz\"}],\"type\":\"illegal_state_exception\",\"reason\":\"baaaaaazzzz\"},\"status\":500}]", + builder.endObject(); + + assertEquals("{\"responses\":[" + + "{" + + "\"error\":{\"root_cause\":[{\"type\":\"illegal_state_exception\",\"reason\":\"foobar\"}]," + + "\"type\":\"illegal_state_exception\",\"reason\":\"foobar\"},\"status\":500" + + "}," + + "{" + + "\"error\":{\"root_cause\":[{\"type\":\"illegal_state_exception\",\"reason\":\"baaaaaazzzz\"}]," + + "\"type\":\"illegal_state_exception\",\"reason\":\"baaaaaazzzz\"},\"status\":500" + + "}" + + "]}", builder.string()); } diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java index fe69fc1f05d..d0e095e8c65 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java @@ -356,6 +356,7 @@ public class XContentBuilderTests extends ESTestCase { public void testWriteFieldMapWithNullKeys() throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); try { + builder.startObject(); builder.field("map", Collections.singletonMap(null, "test")); fail("write map should have failed"); } catch(IllegalArgumentException e) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java index 002d06b7d60..0fd3b9aa359 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java @@ -319,9 +319,12 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { .endObject() .endObject().endObject().endObject().string(); mapper = parser.parse("type", new CompressedXContent(mapping)); - XContentBuilder builder = XContentFactory.jsonBuilder(); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); mapper.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true"))); + builder.endObject(); + String mappingString = builder.string(); assertTrue(mappingString.contains("analyzer")); assertTrue(mappingString.contains("search_analyzer")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java index 416774b1be4..f6dc921687d 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java @@ -337,9 +337,12 @@ public class SimpleStringMappingTests extends ESSingleNodeTestCase { .endObject() .endObject().endObject().endObject().string(); mapper = parser.parse("type", new CompressedXContent(mapping)); - XContentBuilder builder = XContentFactory.jsonBuilder(); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); mapper.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true"))); + builder.endObject(); + String mappingString = builder.string(); assertTrue(mappingString.contains("analyzer")); assertTrue(mappingString.contains("search_analyzer")); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index c9b3daa806a..12ac2910a4b 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -42,6 +42,9 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog.Location; @@ -351,10 +354,14 @@ public class TranslogTests extends ESTestCase { assertEquals(6, copy.estimatedNumberOfOperations()); assertEquals(431, copy.getTranslogSizeInBytes()); - assertEquals("\"translog\"{\n" + - " \"operations\" : 6,\n" + - " \"size_in_bytes\" : 431\n" + - "}", copy.toString().trim()); + + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + builder.startObject(); + copy.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + assertEquals("{\"translog\":{\"operations\":6,\"size_in_bytes\":431}}", builder.string()); + } try { new TranslogStats(1, -1); diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index 090517adfcd..7cd4e355218 100644 --- a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -109,9 +109,14 @@ public class NodeInfoStreamingTests extends ESTestCase { private void compareJsonOutput(ToXContent param1, ToXContent param2) throws IOException { ToXContent.Params params = ToXContent.EMPTY_PARAMS; XContentBuilder param1Builder = jsonBuilder(); - XContentBuilder param2Builder = jsonBuilder(); + param1Builder.startObject(); param1.toXContent(param1Builder, params); + param1Builder.endObject(); + + XContentBuilder param2Builder = jsonBuilder(); + param2Builder.startObject(); param2.toXContent(param2Builder, params); + param2Builder.endObject(); assertThat(param1Builder.string(), equalTo(param2Builder.string())); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 1027a1097a1..512d42d4e79 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -35,6 +35,7 @@ import org.elasticsearch.script.NativeScriptFactory; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; import org.elasticsearch.search.aggregations.bucket.script.NativeSignificanceScoreScriptNoParams; import org.elasticsearch.search.aggregations.bucket.script.NativeSignificanceScoreScriptWithParams; @@ -116,7 +117,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { .execute() .actionGet(); assertSearchResponse(response); - StringTerms classes = (StringTerms) response.getAggregations().get("class"); + StringTerms classes = response.getAggregations().get("class"); assertThat(classes.getBuckets().size(), equalTo(2)); for (Terms.Bucket classBucket : classes.getBuckets()) { Map aggs = classBucket.getAggregations().asMap(); @@ -246,7 +247,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { } public void testXContentResponse() throws Exception { - String type = false || randomBoolean() ? "text" : "long"; + String type = randomBoolean() ? "text" : "long"; String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}"; SharedSignificantTermsTestMethods.index01Docs(type, settings, this); SearchResponse response = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) @@ -254,7 +255,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { .execute() .actionGet(); assertSearchResponse(response); - StringTerms classes = (StringTerms) response.getAggregations().get("class"); + StringTerms classes = response.getAggregations().get("class"); assertThat(classes.getBuckets().size(), equalTo(2)); for (Terms.Bucket classBucket : classes.getBuckets()) { Map aggs = classBucket.getAggregations().asMap(); @@ -267,13 +268,39 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { } XContentBuilder responseBuilder = XContentFactory.jsonBuilder(); + responseBuilder.startObject(); classes.toXContent(responseBuilder, null); - String result = null; - if (type.equals("long")) { - result = "\"class\"{\"doc_count_error_upper_bound\":0,\"sum_other_doc_count\":0,\"buckets\":[{\"key\":\"0\",\"doc_count\":4,\"sig_terms\":{\"doc_count\":4,\"buckets\":[{\"key\":0,\"doc_count\":4,\"score\":0.39999999999999997,\"bg_count\":5}]}},{\"key\":\"1\",\"doc_count\":3,\"sig_terms\":{\"doc_count\":3,\"buckets\":[{\"key\":1,\"doc_count\":3,\"score\":0.75,\"bg_count\":4}]}}]}"; - } else { - result = "\"class\"{\"doc_count_error_upper_bound\":0,\"sum_other_doc_count\":0,\"buckets\":[{\"key\":\"0\",\"doc_count\":4,\"sig_terms\":{\"doc_count\":4,\"buckets\":[{\"key\":\"0\",\"doc_count\":4,\"score\":0.39999999999999997,\"bg_count\":5}]}},{\"key\":\"1\",\"doc_count\":3,\"sig_terms\":{\"doc_count\":3,\"buckets\":[{\"key\":\"1\",\"doc_count\":3,\"score\":0.75,\"bg_count\":4}]}}]}"; - } + responseBuilder.endObject(); + + String result = "{\"class\":{\"doc_count_error_upper_bound\":0,\"sum_other_doc_count\":0," + + "\"buckets\":[" + + "{" + + "\"key\":\"0\"," + + "\"doc_count\":4," + + "\"sig_terms\":{" + + "\"doc_count\":4," + + "\"buckets\":[" + + "{" + + "\"key\":" + (type.equals("long") ? "0," : "\"0\",") + + "\"doc_count\":4," + + "\"score\":0.39999999999999997," + + "\"bg_count\":5" + + "}" + + "]" + + "}" + + "}," + + "{" + + "\"key\":\"1\"," + + "\"doc_count\":3," + + "\"sig_terms\":{" + + "\"doc_count\":3," + + "\"buckets\":[" + + "{" + + "\"key\":" + (type.equals("long") ? "1," : "\"1\",") + + "\"doc_count\":3," + + "\"score\":0.75," + + "\"bg_count\":4" + + "}]}}]}}"; assertThat(responseBuilder.string(), equalTo(result)); } @@ -309,7 +336,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { } indexRandom(true, false, indexRequestBuilderList); - SearchResponse response1 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) + client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) .addAggregation( terms("class") .field(CLASS_FIELD) @@ -334,7 +361,8 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { // 1. terms agg on class and significant terms // 2. filter buckets and set the background to the other class and set is_background false // both should yield exact same result - public void testBackgroundVsSeparateSet(SignificanceHeuristic significanceHeuristicExpectingSuperset, SignificanceHeuristic significanceHeuristicExpectingSeparateSets) throws Exception { + public void testBackgroundVsSeparateSet(SignificanceHeuristic significanceHeuristicExpectingSuperset, + SignificanceHeuristic significanceHeuristicExpectingSeparateSets) throws Exception { SearchResponse response1 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) .addAggregation(terms("class") @@ -364,18 +392,25 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { .execute() .actionGet(); - SignificantTerms sigTerms0 = ((SignificantTerms) (((StringTerms) response1.getAggregations().get("class")).getBucketByKey("0").getAggregations().asMap().get("sig_terms"))); + StringTerms classes = response1.getAggregations().get("class"); + + SignificantTerms sigTerms0 = ((SignificantTerms) (classes.getBucketByKey("0").getAggregations().asMap().get("sig_terms"))); assertThat(sigTerms0.getBuckets().size(), equalTo(2)); double score00Background = sigTerms0.getBucketByKey("0").getSignificanceScore(); double score01Background = sigTerms0.getBucketByKey("1").getSignificanceScore(); - SignificantTerms sigTerms1 = ((SignificantTerms) (((StringTerms) response1.getAggregations().get("class")).getBucketByKey("1").getAggregations().asMap().get("sig_terms"))); + SignificantTerms sigTerms1 = ((SignificantTerms) (classes.getBucketByKey("1").getAggregations().asMap().get("sig_terms"))); double score10Background = sigTerms1.getBucketByKey("0").getSignificanceScore(); double score11Background = sigTerms1.getBucketByKey("1").getSignificanceScore(); - double score00SeparateSets = ((SignificantTerms) ((InternalFilter) response2.getAggregations().get("0")).getAggregations().getAsMap().get("sig_terms")).getBucketByKey("0").getSignificanceScore(); - double score01SeparateSets = ((SignificantTerms) ((InternalFilter) response2.getAggregations().get("0")).getAggregations().getAsMap().get("sig_terms")).getBucketByKey("1").getSignificanceScore(); - double score10SeparateSets = ((SignificantTerms) ((InternalFilter) response2.getAggregations().get("1")).getAggregations().getAsMap().get("sig_terms")).getBucketByKey("0").getSignificanceScore(); - double score11SeparateSets = ((SignificantTerms) ((InternalFilter) response2.getAggregations().get("1")).getAggregations().getAsMap().get("sig_terms")).getBucketByKey("1").getSignificanceScore(); + Aggregations aggs = response2.getAggregations(); + + sigTerms0 = (SignificantTerms) ((InternalFilter) aggs.get("0")).getAggregations().getAsMap().get("sig_terms"); + double score00SeparateSets = sigTerms0.getBucketByKey("0").getSignificanceScore(); + double score01SeparateSets = sigTerms0.getBucketByKey("1").getSignificanceScore(); + + sigTerms1 = (SignificantTerms) ((InternalFilter) aggs.get("1")).getAggregations().getAsMap().get("sig_terms"); + double score10SeparateSets = sigTerms1.getBucketByKey("0").getSignificanceScore(); + double score11SeparateSets = sigTerms1.getBucketByKey("1").getSignificanceScore(); assertThat(score00Background, equalTo(score00SeparateSets)); assertThat(score01Background, equalTo(score01SeparateSets)); @@ -401,11 +436,15 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { .execute() .actionGet(); assertSearchResponse(response); - StringTerms classes = (StringTerms) response.getAggregations().get("class"); + StringTerms classes = response.getAggregations().get("class"); assertThat(classes.getBuckets().size(), equalTo(2)); Iterator classBuckets = classes.getBuckets().iterator(); - Collection classA = ((SignificantTerms) classBuckets.next().getAggregations().get("mySignificantTerms")).getBuckets(); - Iterator classBBucketIterator = ((SignificantTerms) classBuckets.next().getAggregations().get("mySignificantTerms")).getBuckets().iterator(); + + Aggregations aggregations = classBuckets.next().getAggregations(); + SignificantTerms sigTerms = aggregations.get("mySignificantTerms"); + + Collection classA = sigTerms.getBuckets(); + Iterator classBBucketIterator = sigTerms.getBuckets().iterator(); assertThat(classA.size(), greaterThan(0)); for (SignificantTerms.Bucket classABucket : classA) { SignificantTerms.Bucket classBBucket = classBBucketIterator.next(); @@ -462,8 +501,10 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { .actionGet(); assertSearchResponse(response); for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("class")).getBuckets()) { - for (SignificantTerms.Bucket bucket : ((SignificantTerms) classBucket.getAggregations().get("mySignificantTerms")).getBuckets()) { - assertThat(bucket.getSignificanceScore(), is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize())); + SignificantTerms sigTerms = classBucket.getAggregations().get("mySignificantTerms"); + for (SignificantTerms.Bucket bucket : sigTerms.getBuckets()) { + assertThat(bucket.getSignificanceScore(), + is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize())); } } } @@ -478,9 +519,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { } else { script = new Script("native_significance_score_script_no_params", ScriptType.INLINE, "native", null); } - ScriptHeuristic scriptHeuristic = new ScriptHeuristic(script); - - return scriptHeuristic; + return new ScriptHeuristic(script); } private void indexRandomFrequencies01(String type) throws ExecutionException, InterruptedException { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java index 0950501b8ba..6b7e51cc90f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPoolStats; import org.joda.time.DateTimeZone; import org.joda.time.Instant; @@ -41,6 +42,7 @@ import java.io.IOException; import static java.lang.Math.max; import static java.lang.Math.min; +import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -149,9 +151,21 @@ public class ExtendedBoundsTests extends ESTestCase { ExtendedBounds orig = randomExtendedBounds(); try (XContentBuilder out = JsonXContent.contentBuilder()) { + out.startObject(); orig.toXContent(out, ToXContent.EMPTY_PARAMS); + out.endObject(); + try (XContentParser in = JsonXContent.jsonXContent.createParser(out.bytes())) { - in.nextToken(); + XContentParser.Token token = in.currentToken(); + assertNull(token); + + token = in.nextToken(); + assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + + token = in.nextToken(); + assertThat(token, equalTo(XContentParser.Token.FIELD_NAME)); + assertThat(in.currentName(), equalTo(ExtendedBounds.EXTENDED_BOUNDS_FIELD.getPreferredName())); + ExtendedBounds read = ExtendedBounds.PARSER.apply(in, () -> ParseFieldMatcher.STRICT); assertEquals(orig, read); } catch (Exception e) { diff --git a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java index fc0a8abac27..a7312f59406 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java @@ -71,9 +71,11 @@ public class ThreadPoolStatsTests extends ESTestCase { stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.FORCE_MERGE, -1, 0, 0, 0, 0, 0L)); stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SAME, -1, 0, 0, 0, 0, 0L)); - + ThreadPoolStats threadPoolStats = new ThreadPoolStats(stats); try (XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), os)) { - new ThreadPoolStats(stats).toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.startObject(); + threadPoolStats.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); } try (XContentParser parser = XContentType.JSON.xContent().createParser(os.bytes())) { @@ -81,7 +83,11 @@ public class ThreadPoolStatsTests extends ESTestCase { assertNull(token); token = parser.nextToken(); - assertThat(token, equalTo(XContentParser.Token.VALUE_STRING)); + assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + + token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.FIELD_NAME)); + assertThat(parser.currentName(), equalTo(ThreadPoolStats.Fields.THREAD_POOL)); token = parser.nextToken(); assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); diff --git a/distribution/licenses/jackson-core-2.7.1.jar.sha1 b/distribution/licenses/jackson-core-2.7.1.jar.sha1 deleted file mode 100644 index 73831ed2d51..00000000000 --- a/distribution/licenses/jackson-core-2.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4127b62db028f981e81caa248953c0899d720f98 \ No newline at end of file diff --git a/distribution/licenses/jackson-core-2.8.1.jar.sha1 b/distribution/licenses/jackson-core-2.8.1.jar.sha1 new file mode 100644 index 00000000000..b92131d6fab --- /dev/null +++ b/distribution/licenses/jackson-core-2.8.1.jar.sha1 @@ -0,0 +1 @@ +fd13b1c033741d48291315c6370f7d475a42dccf \ No newline at end of file diff --git a/distribution/licenses/jackson-dataformat-cbor-2.7.1.jar.sha1 b/distribution/licenses/jackson-dataformat-cbor-2.7.1.jar.sha1 deleted file mode 100644 index 19bb5e64610..00000000000 --- a/distribution/licenses/jackson-dataformat-cbor-2.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4282418817ad2be26ce18739461499eae679390f \ No newline at end of file diff --git a/distribution/licenses/jackson-dataformat-cbor-2.8.1.jar.sha1 b/distribution/licenses/jackson-dataformat-cbor-2.8.1.jar.sha1 new file mode 100644 index 00000000000..7f1609bfd85 --- /dev/null +++ b/distribution/licenses/jackson-dataformat-cbor-2.8.1.jar.sha1 @@ -0,0 +1 @@ +3a6fb7e75c9972559a78cf5cfc5a48a41a13ea40 \ No newline at end of file diff --git a/distribution/licenses/jackson-dataformat-smile-2.7.1.jar.sha1 b/distribution/licenses/jackson-dataformat-smile-2.7.1.jar.sha1 deleted file mode 100644 index 45c78df1ba3..00000000000 --- a/distribution/licenses/jackson-dataformat-smile-2.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ccde45d574388371d2c4032d4b853e2d596777e \ No newline at end of file diff --git a/distribution/licenses/jackson-dataformat-smile-2.8.1.jar.sha1 b/distribution/licenses/jackson-dataformat-smile-2.8.1.jar.sha1 new file mode 100644 index 00000000000..114d656a388 --- /dev/null +++ b/distribution/licenses/jackson-dataformat-smile-2.8.1.jar.sha1 @@ -0,0 +1 @@ +005b73867bc12224946fc67fc8d49d9f5e698d7f \ No newline at end of file diff --git a/distribution/licenses/jackson-dataformat-yaml-2.7.1.jar.sha1 b/distribution/licenses/jackson-dataformat-yaml-2.7.1.jar.sha1 deleted file mode 100644 index 01b442577b1..00000000000 --- a/distribution/licenses/jackson-dataformat-yaml-2.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c5235a523b7d720b2b0e1b850ea14083e342b07 \ No newline at end of file diff --git a/distribution/licenses/jackson-dataformat-yaml-2.8.1.jar.sha1 b/distribution/licenses/jackson-dataformat-yaml-2.8.1.jar.sha1 new file mode 100644 index 00000000000..32ce0f74344 --- /dev/null +++ b/distribution/licenses/jackson-dataformat-yaml-2.8.1.jar.sha1 @@ -0,0 +1 @@ +eb63166c723b0b4b9fb5298fca232a2f6612ec34 \ No newline at end of file diff --git a/distribution/licenses/snakeyaml-1.15.jar.sha1 b/distribution/licenses/snakeyaml-1.15.jar.sha1 new file mode 100644 index 00000000000..48391d6d9e1 --- /dev/null +++ b/distribution/licenses/snakeyaml-1.15.jar.sha1 @@ -0,0 +1 @@ +3b132bea69e8ee099f416044970997bde80f4ea6 \ No newline at end of file diff --git a/distribution/licenses/snakeyaml-LICENSE.txt b/distribution/licenses/snakeyaml-LICENSE.txt new file mode 100644 index 00000000000..d9a10c0d8e8 --- /dev/null +++ b/distribution/licenses/snakeyaml-LICENSE.txt @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/distribution/licenses/snakeyaml-NOTICE.txt b/distribution/licenses/snakeyaml-NOTICE.txt new file mode 100644 index 00000000000..b51464eee1f --- /dev/null +++ b/distribution/licenses/snakeyaml-NOTICE.txt @@ -0,0 +1,24 @@ +***The art of simplicity is a puzzle of complexity.*** + +## Overview ## +[YAML](http://yaml.org) is a data serialization format designed for human readability and interaction with scripting languages. + +SnakeYAML is a YAML processor for the Java Virtual Machine. + +## SnakeYAML features ## + +* a **complete** [YAML 1.1 processor](http://yaml.org/spec/1.1/current.html). In particular, SnakeYAML can parse all examples from the specification. +* Unicode support including UTF-8/UTF-16 input/output. +* high-level API for serializing and deserializing native Java objects. +* support for all types from the [YAML types repository](http://yaml.org/type/index.html). +* relatively sensible error messages. + +## Info ## + * [Changes](https://bitbucket.org/asomov/snakeyaml/wiki/Changes) + * [Documentation](https://bitbucket.org/asomov/snakeyaml/wiki/Documentation) + +## Contribute ## +* Mercurial DVCS is used to dance with the [source code](https://bitbucket.org/asomov/snakeyaml/src). +* If you find a bug in SnakeYAML, please [file a bug report](https://bitbucket.org/asomov/snakeyaml/issues?status=new&status=open). +* You may discuss SnakeYAML at +[the mailing list](http://groups.google.com/group/snakeyaml-core). \ No newline at end of file diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yaml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yaml index a22b101bf67..8b8ffcf8ae9 100644 --- a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yaml +++ b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/50_multi_search_template.yaml @@ -98,7 +98,7 @@ setup: query_type: "unknown" - match: { responses.0.hits.total: 2 } - - match: { responses.1.error.root_cause.0.type: json_parse_exception } + - match: { responses.1.error.root_cause.0.type: json_e_o_f_exception } - match: { responses.1.error.root_cause.0.reason: "/Unexpected.end.of.input/" } - match: { responses.2.hits.total: 1 } - match: { responses.3.error.root_cause.0.type: parsing_exception } From 7f0bd5609441a5e8dd596cf901e103dd31ee3d83 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 5 Aug 2016 13:55:18 +0200 Subject: [PATCH 064/103] [TEST] use expectThrows wherever possible in query builder unit tests --- .../index/query/SimpleQueryStringBuilder.java | 4 +- .../query/AbstractTermQueryTestCase.java | 22 +--- .../index/query/BoolQueryBuilderTests.java | 28 +---- .../query/BoostingQueryBuilderTests.java | 25 +---- .../query/CommonTermsQueryBuilderTests.java | 31 ++--- .../query/ConstantScoreQueryBuilderTests.java | 24 +--- .../index/query/DisMaxQueryBuilderTests.java | 7 +- .../FieldMaskingSpanQueryBuilderTests.java | 24 +--- .../index/query/FuzzyQueryBuilderTests.java | 46 ++------ .../GeoBoundingBoxQueryBuilderTests.java | 52 +++------ .../query/GeoDistanceQueryBuilderTests.java | 106 +++++------------- .../query/GeoDistanceRangeQueryTests.java | 96 +++++----------- .../query/GeoPolygonQueryBuilderTests.java | 60 +++------- .../query/GeoShapeQueryBuilderTests.java | 83 +++++--------- .../query/GeohashCellQueryBuilderTests.java | 37 ++---- .../query/HasChildQueryBuilderTests.java | 19 +--- .../query/HasParentQueryBuilderTests.java | 8 +- .../index/query/IdsQueryBuilderTests.java | 55 +++------ .../index/query/IndicesQueryBuilderTests.java | 7 +- .../MatchPhrasePrefixQueryBuilderTests.java | 42 ++----- .../query/MatchPhraseQueryBuilderTests.java | 33 ++---- .../index/query/MatchQueryBuilderTests.java | 44 ++------ .../query/MoreLikeThisQueryBuilderTests.java | 37 ++---- .../query/MultiMatchQueryBuilderTests.java | 31 +---- .../index/query/PrefixQueryBuilderTests.java | 31 ++--- .../index/query/QueryShardContextTests.java | 8 +- .../query/QueryStringQueryBuilderTests.java | 32 +++--- .../index/query/RangeQueryBuilderTests.java | 82 ++++---------- .../index/query/RegexpQueryBuilderTests.java | 34 ++---- .../query/SimpleQueryStringBuilderTests.java | 40 ++----- .../SpanContainingQueryBuilderTests.java | 16 +-- .../query/SpanFirstQueryBuilderTests.java | 16 +-- .../query/SpanMultiTermQueryBuilderTests.java | 18 +-- .../index/query/SpanNotQueryBuilderTests.java | 39 ++----- .../query/SpanTermQueryBuilderTests.java | 14 +-- .../query/SpanWithinQueryBuilderTests.java | 16 +-- .../index/query/TermQueryBuilderTests.java | 27 ++--- .../index/query/TermsQueryBuilderTests.java | 102 +++++------------ .../index/query/TypeQueryBuilderTests.java | 7 +- .../query/WildcardQueryBuilderTests.java | 37 ++---- .../index/query/WrapperQueryBuilderTests.java | 55 ++------- .../FunctionScoreQueryBuilderTests.java | 8 +- .../test/AbstractQueryTestCase.java | 18 +-- 43 files changed, 399 insertions(+), 1122 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index c75313f6c44..f408c0f1473 100644 --- a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -186,7 +186,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder createQueryBuilder(null, term)); + assertEquals("field name is null or empty", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> createQueryBuilder("", term)); + assertEquals("field name is null or empty", e.getMessage()); } @Override diff --git a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java index 3839254bf1d..5e911261171 100644 --- a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java @@ -163,30 +163,10 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase booleanQuery.must(null)); + expectThrows(IllegalArgumentException.class, () -> booleanQuery.mustNot(null)); + expectThrows(IllegalArgumentException.class, () -> booleanQuery.filter(null)); + expectThrows(IllegalArgumentException.class, () -> booleanQuery.should(null)); } // https://github.com/elastic/elasticsearch/issues/7240 diff --git a/core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java index 343c6270746..86592847e95 100644 --- a/core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java @@ -54,26 +54,10 @@ public class BoostingQueryBuilderTests extends AbstractQueryTestCase new BoostingQueryBuilder(null, new MatchAllQueryBuilder())); + expectThrows(IllegalArgumentException.class, () -> new BoostingQueryBuilder(new MatchAllQueryBuilder(), null)); + expectThrows(IllegalArgumentException.class, + () -> new BoostingQueryBuilder(new MatchAllQueryBuilder(), new MatchAllQueryBuilder()).negativeBoost(-1.0f)); } public void testFromJson() throws IOException { @@ -103,7 +87,6 @@ public class BoostingQueryBuilderTests extends AbstractQueryTestCase new CommonTermsQueryBuilder(null, "text")); + assertEquals("field name is null or empty", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> new CommonTermsQueryBuilder("", "text")); + assertEquals("field name is null or empty", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> new CommonTermsQueryBuilder("fieldName", null)); + assertEquals("text cannot be null", e.getMessage()); } public void testFromJson() throws IOException { @@ -203,11 +192,7 @@ public class CommonTermsQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertEquals("[common] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java index 86381c135a5..d716a553a5a 100644 --- a/core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java @@ -61,12 +61,8 @@ public class ConstantScoreQueryBuilderTests extends AbstractQueryTestCase parseQuery(queryString)); + assertThat(e.getMessage(), containsString("requires a 'filter' element")); } /** @@ -77,12 +73,8 @@ public class ConstantScoreQueryBuilderTests extends AbstractQueryTestCase parseQuery(queryString)); + assertThat(e.getMessage(), containsString("accepts only one 'filter' element")); } /** @@ -93,12 +85,8 @@ public class ConstantScoreQueryBuilderTests extends AbstractQueryTestCase parseQuery(queryString)); + assertThat(e.getMessage(), containsString("unexpected token [START_ARRAY]")); } public void testIllegalArguments() { diff --git a/core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java index ebd6446f80a..8fa8724ae3c 100644 --- a/core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java @@ -102,12 +102,7 @@ public class DisMaxQueryBuilderTests extends AbstractQueryTestCase disMaxQuery.add(null)); } public void testToQueryInnerPrefixQuery() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java index 1939f0e6b44..e6cefdb67af 100644 --- a/core/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java @@ -56,24 +56,10 @@ public class FieldMaskingSpanQueryBuilderTests extends AbstractQueryTestCase new FieldMaskingSpanQueryBuilder(null, "maskedField")); + SpanQueryBuilder span = new SpanTermQueryBuilder("name", "value"); + expectThrows(IllegalArgumentException.class, () -> new FieldMaskingSpanQueryBuilder(span, null)); + expectThrows(IllegalArgumentException.class, () -> new FieldMaskingSpanQueryBuilder(span, "")); } public void testFromJson() throws IOException { @@ -93,10 +79,8 @@ public class FieldMaskingSpanQueryBuilderTests extends AbstractQueryTestCase new FuzzyQueryBuilder(null, "text")); + assertEquals("field name cannot be null or empty", e.getMessage()); - try { - new FuzzyQueryBuilder("", "text"); - fail("must not be empty"); - } catch (IllegalArgumentException e) { - assertEquals("field name cannot be null or empty", e.getMessage()); - } + e = expectThrows(IllegalArgumentException.class, () -> new FuzzyQueryBuilder("", "text")); + assertEquals("field name cannot be null or empty", e.getMessage()); - try { - new FuzzyQueryBuilder("field", null); - fail("must not be null"); - } catch (IllegalArgumentException e) { - assertEquals("query value cannot be null", e.getMessage()); - } + e = expectThrows(IllegalArgumentException.class, () -> new FuzzyQueryBuilder("field", null)); + assertEquals("query value cannot be null", e.getMessage()); } public void testUnsupportedFuzzinessForStringType() throws IOException { QueryShardContext context = createShardContext(); context.setAllowUnmappedFields(true); - FuzzyQueryBuilder fuzzyQueryBuilder = new FuzzyQueryBuilder(STRING_FIELD_NAME, "text"); fuzzyQueryBuilder.fuzziness(Fuzziness.build(randomFrom("a string which is not auto", "3h", "200s"))); - - try { - fuzzyQueryBuilder.toQuery(context); - fail("should have failed with NumberFormatException"); - } catch (NumberFormatException e) { - assertThat(e.getMessage(), Matchers.containsString("For input string")); - } + NumberFormatException e = expectThrows(NumberFormatException.class, () -> fuzzyQueryBuilder.toQuery(context)); + assertThat(e.getMessage(), containsString("For input string")); } public void testToQueryWithStringField() throws IOException { @@ -135,7 +117,6 @@ public class FuzzyQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertEquals("[fuzzy] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); } - } diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java index 910de2cf5d9..c9f55fe9992 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java @@ -44,7 +44,6 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase { /** Randomly generate either NaN or one of the two infinity values. */ @@ -104,22 +103,14 @@ public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase qb.type((GeoExecType) null)); + assertEquals("Type is not allowed to be null.", e.getMessage()); } public void testValidationNullTypeString() { GeoBoundingBoxQueryBuilder qb = new GeoBoundingBoxQueryBuilder("teststring"); - try { - qb.type((String) null); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("cannot parse type from null string")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> qb.type((String) null)); + assertEquals("cannot parse type from null string", e.getMessage()); } @Override @@ -130,27 +121,17 @@ public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase super.testToQuery()); + assertEquals("failed to find geo_point field [mapped_geo_point]", e.getMessage()); } public void testBrokenCoordinateCannotBeSet() { PointTester[] testers = { new TopTester(), new LeftTester(), new BottomTester(), new RightTester() }; - GeoBoundingBoxQueryBuilder builder = createTestQueryBuilder(); builder.setValidationMethod(GeoValidationMethod.STRICT); for (PointTester tester : testers) { - try { - tester.invalidateCoordinate(builder, true); - fail("expected exception for broken " + tester.getClass().getName() + " coordinate"); - } catch (IllegalArgumentException e) { - // expected - } + expectThrows(IllegalArgumentException.class, () -> tester.invalidateCoordinate(builder, true)); } } @@ -215,12 +196,9 @@ public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase builder.setCorners(bottom, left, top, right)); + assertThat(e.getMessage(), containsString("top is below bottom corner:")); } public void testTopBottomCanBeFlippedOnIgnoreMalformed() { @@ -482,7 +460,7 @@ public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase parseQuery(deprecatedJson)); + assertEquals("Deprecated field [geo_bbox] used, expected [geo_bounding_box] instead", e.getMessage()); } public void testFromJsonCoerceFails() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java index decdf50b168..91bb90dccb9 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java @@ -86,82 +86,41 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase new GeoDistanceQueryBuilder("")); + assertEquals("fieldName must not be null or empty", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> new GeoDistanceQueryBuilder((String) null)); + assertEquals("fieldName must not be null or empty", e.getMessage()); GeoDistanceQueryBuilder query = new GeoDistanceQueryBuilder("fieldName"); - try { - if (randomBoolean()) { - query.distance(""); - } else { - query.distance(null); - } - fail("must not be null or empty"); - } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), equalTo("distance must not be null or empty")); - } + e = expectThrows(IllegalArgumentException.class, () -> query.distance("")); + assertEquals("distance must not be null or empty", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> query.distance(null)); + assertEquals("distance must not be null or empty", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> query.distance("", DistanceUnit.DEFAULT)); + assertEquals("distance must not be null or empty", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> query.distance(null, DistanceUnit.DEFAULT)); + assertEquals("distance must not be null or empty", e.getMessage()); - try { - if (randomBoolean()) { - query.distance("", DistanceUnit.DEFAULT); - } else { - query.distance(null, DistanceUnit.DEFAULT); - } - fail("distance must not be null or empty"); - } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), equalTo("distance must not be null or empty")); - } + e = expectThrows(IllegalArgumentException.class, () -> query.distance("1", null)); + assertEquals("distance unit must not be null", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> query.distance(1, null)); + assertEquals("distance unit must not be null", e.getMessage()); - try { - if (randomBoolean()) { - query.distance("1", null); - } else { - query.distance(1, null); - } - fail("distance must not be null"); - } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), equalTo("distance unit must not be null")); - } + e = expectThrows(IllegalArgumentException.class, () -> query.distance( + randomIntBetween(Integer.MIN_VALUE, 0), DistanceUnit.DEFAULT)); + assertEquals("distance must be greater than zero", e.getMessage()); - try { - query.distance(randomIntBetween(Integer.MIN_VALUE, 0), DistanceUnit.DEFAULT); - fail("distance must be greater than zero"); - } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), equalTo("distance must be greater than zero")); - } + e = expectThrows(IllegalArgumentException.class, () -> query.geohash(null)); + assertEquals("geohash must not be null or empty", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> query.geohash("")); + assertEquals("geohash must not be null or empty", e.getMessage()); - try { - if (randomBoolean()) { - query.geohash(null); - } else { - query.geohash(""); - } - fail("geohash must not be null"); - } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), equalTo("geohash must not be null or empty")); - } + e = expectThrows(IllegalArgumentException.class, () -> query.geoDistance(null)); + assertEquals("geoDistance must not be null", e.getMessage()); - try { - query.geoDistance(null); - fail("geodistance must not be null"); - } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), equalTo("geoDistance must not be null")); - } - - try { - query.optimizeBbox(null); - fail("optimizeBbox must not be null"); - } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), equalTo("optimizeBbox must not be null")); - } + e = expectThrows(IllegalArgumentException.class, () -> query.optimizeBbox(null)); + assertEquals("optimizeBbox must not be null", e.getMessage()); } /** @@ -487,12 +446,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertEquals("[geo_distance] query doesn't support multiple fields, found [point1] and [point2]", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java index 3e10eda34e2..53f3e71a0fc 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java @@ -41,7 +41,6 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase { @@ -213,96 +212,57 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase new GeoDistanceRangeQueryBuilder(null, new GeoPoint())); + assertEquals("fieldName must not be null", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, + () -> new GeoDistanceRangeQueryBuilder("", new GeoPoint())); + assertEquals("fieldName must not be null", e.getMessage()); } public void testNoPoint() { - try { - if (randomBoolean()) { - new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, (GeoPoint) null); - } else { - new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, (String) null); - } - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("point must not be null")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, (GeoPoint) null)); + assertEquals("point must not be null", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, + () -> new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, (String) null)); + assertEquals("point must not be null", e.getMessage()); } public void testInvalidFrom() { GeoDistanceRangeQueryBuilder builder = new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, new GeoPoint()); - try { - if (randomBoolean()) { - builder.from((String) null); - } else { - builder.from((Number) null); - } - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("[from] must not be null")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.from((String) null)); + assertEquals("[from] must not be null", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> builder.from((Number) null)); + assertEquals("[from] must not be null", e.getMessage()); } public void testInvalidTo() { GeoDistanceRangeQueryBuilder builder = new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, new GeoPoint()); - try { - if (randomBoolean()) { - builder.to((String) null); - } else { - builder.to((Number) null); - } - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("[to] must not be null")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.to((String) null)); + assertEquals("[to] must not be null", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> builder.to((Number) null)); + assertEquals("[to] must not be null", e.getMessage()); } public void testInvalidOptimizeBBox() { GeoDistanceRangeQueryBuilder builder = new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, new GeoPoint()); - if (randomBoolean()) { - try { - builder.optimizeBbox(null); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("optimizeBbox must not be null")); - } - } else { - try { - builder.optimizeBbox("foo"); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("optimizeBbox must be one of [none, memory, indexed]")); - } - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.optimizeBbox(null)); + assertEquals("optimizeBbox must not be null", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> builder.optimizeBbox("foo")); + assertEquals("optimizeBbox must be one of [none, memory, indexed]", e.getMessage()); } public void testInvalidGeoDistance() { GeoDistanceRangeQueryBuilder builder = new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, new GeoPoint()); - try { - builder.geoDistance(null); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("geoDistance calculation mode must not be null")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.geoDistance(null)); + assertEquals("geoDistance calculation mode must not be null", e.getMessage()); } public void testInvalidDistanceUnit() { GeoDistanceRangeQueryBuilder builder = new GeoDistanceRangeQueryBuilder(GEO_POINT_FIELD_NAME, new GeoPoint()); - try { - builder.unit(null); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("distance unit must not be null")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.unit(null)); + assertEquals("distance unit must not be null", e.getMessage()); } public void testNestedRangeQuery() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java index e6fd5abd05e..9834ea1a1c3 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.query; import com.vividsolutions.jts.geom.Coordinate; - import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.spatial.geopoint.search.GeoPointInPolygonQuery; @@ -39,6 +38,7 @@ import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; @@ -47,7 +47,6 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; public class GeoPolygonQueryBuilderTests extends AbstractQueryTestCase { @Override @@ -144,25 +143,17 @@ public class GeoPolygonQueryBuilderTests extends AbstractQueryTestCase new GeoPolygonQueryBuilder(null, randomPolygon(5))); + assertEquals("fieldName must not be null", e.getMessage()); } public void testEmptyPolygon() { - try { - if (randomBoolean()) { - new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, new ArrayList()); - } else { - new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, null); - } - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("polygon must not be null or empty")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, Collections.emptyList())); + assertEquals("polygon must not be null or empty", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, null)); + assertEquals("polygon must not be null or empty", e.getMessage()); } public void testInvalidClosedPolygon() { @@ -170,24 +161,18 @@ public class GeoPolygonQueryBuilderTests extends AbstractQueryTestCase new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, points)); + assertEquals("too few points defined for geo_polygon query", e.getMessage()); } public void testInvalidOpenPolygon() { List points = new ArrayList<>(); points.add(new GeoPoint(0, 90)); points.add(new GeoPoint(90, 90)); - try { - new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, points); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("too few points defined for geo_polygon query")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new GeoPolygonQueryBuilder(GEO_POINT_FIELD_NAME, points)); + assertEquals("too few points defined for geo_polygon query", e.getMessage()); } public void testDeprecatedXContent() throws IOException { @@ -205,12 +190,8 @@ public class GeoPolygonQueryBuilderTests extends AbstractQueryTestCase parseQuery(builder.string())); + assertEquals("Deprecated field [normalize] used, replaced by [use validation_method instead]", e.getMessage()); } public void testParsingAndToQueryParsingExceptions() throws IOException { @@ -223,12 +204,7 @@ public class GeoPolygonQueryBuilderTests extends AbstractQueryTestCase parseQuery(query)); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index 940dbb3242c..993e75724de 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.query; import com.vividsolutions.jts.geom.Coordinate; - import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchNoDocsQuery; @@ -50,7 +49,6 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase { @@ -156,70 +154,44 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase new GeoShapeQueryBuilder(null, shape)); + assertEquals("fieldName is required", e.getMessage()); } public void testNoShape() throws IOException { - try { - new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, (ShapeBuilder) null); - fail("exception expected"); - } catch (IllegalArgumentException e) { - // expected - } + expectThrows(IllegalArgumentException.class, () -> new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, null)); } public void testNoIndexedShape() throws IOException { - try { - new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, null, "type"); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("either shapeBytes or indexedShapeId and indexedShapeType are required")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, null, "type")); + assertEquals("either shapeBytes or indexedShapeId and indexedShapeType are required", e.getMessage()); } public void testNoIndexedShapeType() throws IOException { - try { - new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, "id", null); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("indexedShapeType is required if indexedShapeId is specified")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, "id", null)); + assertEquals("indexedShapeType is required if indexedShapeId is specified", e.getMessage()); } public void testNoRelation() throws IOException { ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null); GeoShapeQueryBuilder builder = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); - try { - builder.relation(null); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("No Shape Relation defined")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.relation(null)); + assertEquals("No Shape Relation defined", e.getMessage()); } public void testInvalidRelation() throws IOException { ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null); GeoShapeQueryBuilder builder = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); - try { - builder.strategy(SpatialStrategy.TERM); - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN)); - fail("Illegal combination of strategy and relation setting"); - } catch (IllegalArgumentException e) { - // okay - } - - try { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN)); - builder.strategy(SpatialStrategy.TERM); - fail("Illegal combination of strategy and relation setting"); - } catch (IllegalArgumentException e) { - // okay - } + builder.strategy(SpatialStrategy.TERM); + expectThrows(IllegalArgumentException.class, () -> builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN))); + GeoShapeQueryBuilder builder2 = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); + builder2.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN)); + expectThrows(IllegalArgumentException.class, () -> builder2.strategy(SpatialStrategy.TERM)); + GeoShapeQueryBuilder builder3 = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); + builder3.strategy(SpatialStrategy.TERM); + expectThrows(IllegalArgumentException.class, () -> builder3.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN))); } // see #3878 @@ -256,16 +228,15 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase query.toQuery(createShardContext())); + assertEquals("query must be rewritten first", e.getMessage()); + QueryBuilder rewrite = query.rewrite(createShardContext()); GeoShapeQueryBuilder geoShapeQueryBuilder = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, indexedShapeToReturn); - geoShapeQueryBuilder.strategy(sqb.strategy()); - geoShapeQueryBuilder.relation(sqb.relation()); + geoShapeQueryBuilder.strategy(query.strategy()); + geoShapeQueryBuilder.relation(query.relation()); assertEquals(geoShapeQueryBuilder, rewrite); } diff --git a/core/src/test/java/org/elasticsearch/index/query/GeohashCellQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeohashCellQueryBuilderTests.java index 208b0479209..dbccd700165 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeohashCellQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeohashCellQueryBuilderTests.java @@ -39,7 +39,6 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; public class GeohashCellQueryBuilderTests extends AbstractQueryTestCase { @@ -92,39 +91,23 @@ public class GeohashCellQueryBuilderTests extends AbstractQueryTestCase } public void testNullField() { - try { - if (randomBoolean()) { - new Builder(null, new GeoPoint()); - } else { - new Builder("", new GeoPoint()); - } - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("fieldName must not be null")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new Builder(null, new GeoPoint())); + assertEquals("fieldName must not be null", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> new Builder("", new GeoPoint())); + assertEquals("fieldName must not be null", e.getMessage()); } public void testNullGeoPoint() { - try { - if (randomBoolean()) { - new Builder(GEO_POINT_FIELD_NAME, (GeoPoint) null); - } else { - new Builder(GEO_POINT_FIELD_NAME, ""); - } - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("geohash or point must be defined")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new Builder(GEO_POINT_FIELD_NAME, (GeoPoint) null)); + assertEquals("geohash or point must be defined", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> new Builder(GEO_POINT_FIELD_NAME, "")); + assertEquals("geohash or point must be defined", e.getMessage()); } public void testInvalidPrecision() { GeohashCellQuery.Builder builder = new Builder(GEO_POINT_FIELD_NAME, new GeoPoint()); - try { - builder.precision(-1); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("precision must be greater than 0")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.precision(-1)); + assertThat(e.getMessage(), containsString("precision must be greater than 0")); } public void testLocationParsing() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java index d820491c436..4a06ba6d219 100644 --- a/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.query; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.fasterxml.jackson.core.JsonParseException; - import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -63,7 +62,6 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.startsWith; -import static org.hamcrest.Matchers.is; public class HasChildQueryBuilderTests extends AbstractQueryTestCase { protected static final String PARENT_TYPE = "parent"; @@ -367,24 +365,17 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase HasChildQueryBuilder.parseScoreMode(null)); + assertEquals("No score mode for child query [null] found", e.getMessage()); } /** * Failure should not change (and the value should never match anything...). */ public void testThatUnrecognizedFromStringThrowsException() { - try { - HasChildQueryBuilder.parseScoreMode("unrecognized value"); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("No score mode for child query [unrecognized value] found")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> HasChildQueryBuilder.parseScoreMode("unrecognized value")); + assertEquals("No score mode for child query [unrecognized value] found", e.getMessage()); } public void testIgnoreUnmapped() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/HasParentQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/HasParentQueryBuilderTests.java index 321521668ea..f4bd9e1ef02 100644 --- a/core/src/test/java/org/elasticsearch/index/query/HasParentQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/HasParentQueryBuilderTests.java @@ -157,12 +157,8 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase parseQuery(builder.string())); + assertEquals("Deprecated field [type] used, expected [parent_type] instead", e.getMessage()); HasParentQueryBuilder queryBuilder = (HasParentQueryBuilder) parseQuery(builder.string(), ParseFieldMatcher.EMPTY); assertEquals("foo", queryBuilder.type()); diff --git a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java index 1793623ce2f..723509d775f 100644 --- a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java @@ -32,7 +32,6 @@ import java.io.IOException; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.is; public class IdsQueryBuilderTests extends AbstractQueryTestCase { /** @@ -40,12 +39,8 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase */ public void testIdsNotProvided() throws IOException { String noIdsFieldQuery = "{\"ids\" : { \"type\" : \"my_type\" }"; - try { - parseQuery(noIdsFieldQuery); - fail("Expected ParsingException"); - } catch (ParsingException e) { - assertThat(e.getMessage(), containsString("no ids values provided")); - } + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(noIdsFieldQuery)); + assertThat(e.getMessage(), containsString("no ids values provided")); } @Override @@ -94,30 +89,19 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase } public void testIllegalArguments() { - try { - new IdsQueryBuilder((String[])null); - fail("must be not null"); - } catch(IllegalArgumentException e) { - assertEquals("[ids] types cannot be null", e.getMessage()); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new IdsQueryBuilder((String[]) null)); + assertEquals("[ids] types cannot be null", e.getMessage()); - try { - new IdsQueryBuilder().addIds((String[])null); - fail("must be not null"); - } catch(IllegalArgumentException e) { - assertEquals("[ids] ids cannot be null", e.getMessage()); - } + IdsQueryBuilder idsQueryBuilder = new IdsQueryBuilder(); + e = expectThrows(IllegalArgumentException.class, () -> idsQueryBuilder.addIds((String[])null)); + assertEquals("[ids] ids cannot be null", e.getMessage()); } // see #7686. public void testIdsQueryWithInvalidValues() throws Exception { String query = "{ \"ids\": { \"values\": [[1]] } }"; - try { - parseQuery(query); - fail("Expected ParsingException"); - } catch (ParsingException e) { - assertThat(e.getMessage(), is("Illegal value for id, expecting a string or number, got: START_ARRAY")); - } + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(query)); + assertEquals("Illegal value for id, expecting a string or number, got: START_ARRAY", e.getMessage()); } public void testFromJson() throws IOException { @@ -143,7 +127,7 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase IdsQueryBuilder testQuery = new IdsQueryBuilder(type); //single value type can also be called _type - String contentString = "{\n" + + final String contentString = "{\n" + " \"ids\" : {\n" + " \"_type\" : \"" + type + "\",\n" + " \"values\" : []\n" + @@ -153,15 +137,11 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase IdsQueryBuilder parsed = (IdsQueryBuilder) parseQuery(contentString, ParseFieldMatcher.EMPTY); assertEquals(testQuery, parsed); - try { - parseQuery(contentString); - fail("parse should have failed"); - } catch(IllegalArgumentException e) { - assertEquals("Deprecated field [_type] used, expected [type] instead", e.getMessage()); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(contentString)); + assertEquals("Deprecated field [_type] used, expected [type] instead", e.getMessage()); //array of types can also be called type rather than types - contentString = "{\n" + + final String contentString2 = "{\n" + " \"ids\" : {\n" + " \"types\" : [\"" + type + "\"],\n" + " \"values\" : []\n" + @@ -169,11 +149,8 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase "}"; parsed = (IdsQueryBuilder) parseQuery(contentString, ParseFieldMatcher.EMPTY); assertEquals(testQuery, parsed); - try { - parseQuery(contentString); - fail("parse should have failed"); - } catch(IllegalArgumentException e) { - assertEquals("Deprecated field [types] used, expected [type] instead", e.getMessage()); - } + + e = expectThrows(IllegalArgumentException.class, () -> parseQuery(contentString2)); + assertEquals("Deprecated field [types] used, expected [type] instead", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/IndicesQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/IndicesQueryBuilderTests.java index 12527a927c4..d1c2dcee90c 100644 --- a/core/src/test/java/org/elasticsearch/index/query/IndicesQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/IndicesQueryBuilderTests.java @@ -61,12 +61,7 @@ public class IndicesQueryBuilderTests extends AbstractQueryTestCase new IndicesQueryBuilder(null, "index")); expectThrows(IllegalArgumentException.class, () -> new IndicesQueryBuilder(new MatchAllQueryBuilder(), (String[]) null)); expectThrows(IllegalArgumentException.class, () -> new IndicesQueryBuilder(new MatchAllQueryBuilder(), new String[0])); diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java index 83a64ba8002..c6dae00b89b 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java @@ -97,39 +97,22 @@ public class MatchPhrasePrefixQueryBuilderTests extends AbstractQueryTestCase new MatchPhrasePrefixQueryBuilder(null, "value")); + assertEquals("[match_phrase_prefix] requires fieldName", e.getMessage()); - try { - new MatchPhrasePrefixQueryBuilder("fieldName", null); - fail("value must not be non-null"); - } catch (IllegalArgumentException ex) { - assertEquals("[match_phrase_prefix] requires query value", ex.getMessage()); - } + e = expectThrows(IllegalArgumentException.class, () -> new MatchPhrasePrefixQueryBuilder("fieldName", null)); + assertEquals("[match_phrase_prefix] requires query value", e.getMessage()); MatchPhrasePrefixQueryBuilder matchQuery = new MatchPhrasePrefixQueryBuilder("fieldName", "text"); - - try { - matchQuery.maxExpansions(-1); - fail("must not be positive"); - } catch (IllegalArgumentException ex) { - // expected - } + e = expectThrows(IllegalArgumentException.class, () -> matchQuery.maxExpansions(-1)); } public void testBadAnalyzer() throws IOException { MatchPhrasePrefixQueryBuilder matchQuery = new MatchPhrasePrefixQueryBuilder("fieldName", "text"); matchQuery.analyzer("bogusAnalyzer"); - try { - matchQuery.toQuery(createShardContext()); - fail("Expected QueryShardException"); - } catch (QueryShardException e) { - assertThat(e.getMessage(), containsString("analyzer [bogusAnalyzer] not found")); - } + + QueryShardException e = expectThrows(QueryShardException.class, () -> matchQuery.toQuery(createShardContext())); + assertThat(e.getMessage(), containsString("analyzer [bogusAnalyzer] not found")); } public void testPhrasePrefixMatchQuery() throws IOException { @@ -186,12 +169,7 @@ public class MatchPhrasePrefixQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertEquals("[match_phrase_prefix] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java index e7c2e3d8abb..04716b16c4e 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java @@ -91,30 +91,18 @@ public class MatchPhraseQueryBuilderTests extends AbstractQueryTestCase new MatchPhraseQueryBuilder(null, "value")); + assertEquals("[match_phrase] requires fieldName", e.getMessage()); - try { - new MatchPhraseQueryBuilder("fieldName", null); - fail("value must not be non-null"); - } catch (IllegalArgumentException ex) { - assertEquals("[match_phrase] requires query value", ex.getMessage()); - } + e = expectThrows(IllegalArgumentException.class, () -> new MatchPhraseQueryBuilder("fieldName", null)); + assertEquals("[match_phrase] requires query value", e.getMessage()); } public void testBadAnalyzer() throws IOException { MatchPhraseQueryBuilder matchQuery = new MatchPhraseQueryBuilder("fieldName", "text"); matchQuery.analyzer("bogusAnalyzer"); - try { - matchQuery.toQuery(createShardContext()); - fail("Expected QueryShardException"); - } catch (QueryShardException e) { - assertThat(e.getMessage(), containsString("analyzer [bogusAnalyzer] not found")); - } + QueryShardException e = expectThrows(QueryShardException.class, () -> matchQuery.toQuery(createShardContext())); + assertThat(e.getMessage(), containsString("analyzer [bogusAnalyzer] not found")); } public void testPhraseMatchQuery() throws IOException { @@ -148,12 +136,7 @@ public class MatchPhraseQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertEquals("[match_phrase] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 8772b360633..a5cb2fc61c7 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -313,13 +313,9 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase parseQuery(json, ParseFieldMatcher.STRICT)); + assertThat(e.getMessage(), + containsString("Deprecated field [type] used, replaced by [match_phrase and match_phrase_prefix query]")); } public void testLegacyMatchPhraseQuery() throws IOException { @@ -350,13 +346,9 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase parseQuery(json, ParseFieldMatcher.STRICT)); + assertThat(e.getMessage(), + containsString("Deprecated field [type] used, replaced by [match_phrase and match_phrase_prefix query]")); } public void testLegacyFuzzyMatchQuery() throws IOException { @@ -381,13 +373,8 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase parseQuery(json, ParseFieldMatcher.STRICT)); + assertThat(e.getMessage(), containsString("Deprecated field [" + type + "] used, expected [match] instead")); } public void testFuzzinessOnNonStringField() throws Exception { @@ -415,10 +402,8 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase 0); MatchQueryBuilder query = new MatchQueryBuilder(GEO_POINT_FIELD_NAME, "2,3"); QueryShardContext context = createShardContext(); - QueryShardException e = expectThrows(QueryShardException.class, - () -> query.toQuery(context)); - assertEquals("Geo fields do not support exact searching, use dedicated geo queries instead: [mapped_geo_point]", - e.getMessage()); + QueryShardException e = expectThrows(QueryShardException.class, () -> query.toQuery(context)); + assertEquals("Geo fields do not support exact searching, use dedicated geo queries instead: [mapped_geo_point]", e.getMessage()); query.lenient(true); query.toQuery(context); // no exception } @@ -434,12 +419,7 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertEquals("[match] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java index 3c5bfed86dd..291bb9f0bd6 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -245,23 +245,16 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase new MoreLikeThisQueryBuilder(new String[0], new String[]{"likeText"}, null)); + assertThat(e.getMessage(), containsString("requires 'fields' to be specified")); } public void testValidateEmptyLike() { String[] likeTexts = randomBoolean() ? null : new String[0]; Item[] likeItems = randomBoolean() ? null : new Item[0]; - try { - new MoreLikeThisQueryBuilder(likeTexts, likeItems); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("requires either 'like' texts or items to be specified")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MoreLikeThisQueryBuilder(likeTexts, likeItems)); + assertThat(e.getMessage(), containsString("requires either 'like' texts or items to be specified")); } public void testUnsupportedFields() throws IOException { @@ -269,12 +262,8 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase queryBuilder.toQuery(createShardContext())); + assertThat(e.getMessage(), containsString("more_like_this only supports text/keyword fields")); } public void testMoreLikeThisBuilder() throws Exception { @@ -337,7 +326,7 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase parseQuery(deprecatedJson)); + assertEquals("Deprecated field [mlt] used, expected [more_like_this] instead", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index 193d82b4c8e..e96c99bdcf6 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -154,33 +154,10 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase new MultiMatchQueryBuilder(null, "field")); + expectThrows(IllegalArgumentException.class, () -> new MultiMatchQueryBuilder("value", (String[]) null)); + expectThrows(IllegalArgumentException.class, () -> new MultiMatchQueryBuilder("value", new String[]{""})); + expectThrows(IllegalArgumentException.class, () -> new MultiMatchQueryBuilder("value", "field").type(null)); } public void testToQueryBoost() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java index 22ee7ef81f3..87d3bb2ae83 100644 --- a/core/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java @@ -73,23 +73,13 @@ public class PrefixQueryBuilderTests extends AbstractQueryTestCase new PrefixQueryBuilder(null, "text")); + assertEquals("field name is null or empty", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> new PrefixQueryBuilder("", "text")); + assertEquals("field name is null or empty", e.getMessage()); - try { - new PrefixQueryBuilder("field", null); - fail("value cannot be null"); - } catch (IllegalArgumentException e) { - assertEquals("value cannot be null", e.getMessage()); - } + e = expectThrows(IllegalArgumentException.class, () -> new PrefixQueryBuilder("field", null)); + assertEquals("value cannot be null", e.getMessage()); } public void testBlendedRewriteMethod() throws IOException { @@ -135,12 +125,7 @@ public class PrefixQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertEquals("[prefix] query doesn't support multiple fields, found [user1] and [user2]", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java index 9a4c9aece5c..e1a41df4da5 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java @@ -54,12 +54,8 @@ public class QueryShardContextTests extends ESTestCase { MappedFieldType fieldType = new TextFieldMapper.TextFieldType(); MappedFieldType result = context.failIfFieldMappingNotFound("name", fieldType); assertThat(result, sameInstance(fieldType)); - try { - context.failIfFieldMappingNotFound("name", null); - fail("exception expected"); - } catch (QueryShardException e) { - assertThat(e.getMessage(), equalTo("No field mapping can be found for the field with name [name]")); - } + QueryShardException e = expectThrows(QueryShardException.class, () -> context.failIfFieldMappingNotFound("name", null)); + assertEquals("No field mapping can be found for the field with name [name]", e.getMessage()); context.setAllowUnmappedFields(true); result = context.failIfFieldMappingNotFound("name", fieldType); diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 4996794775c..be77ba00734 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -382,13 +382,12 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase 0); - try { - queryStringQuery("/[ac]*a[ac]{50,200}/").defaultField(STRING_FIELD_NAME).toQuery(createShardContext()); - fail("Expected TooComplexToDeterminizeException"); - } catch (TooComplexToDeterminizeException e) { - assertThat(e.getMessage(), containsString("Determinizing [ac]*")); - assertThat(e.getMessage(), containsString("would result in more than 10000 states")); - } + QueryStringQueryBuilder queryBuilder = queryStringQuery("/[ac]*a[ac]{50,200}/").defaultField(STRING_FIELD_NAME); + + TooComplexToDeterminizeException e = expectThrows(TooComplexToDeterminizeException.class, + () -> queryBuilder.toQuery(createShardContext())); + assertThat(e.getMessage(), containsString("Determinizing [ac]*")); + assertThat(e.getMessage(), containsString("would result in more than 10000 states")); } public void testFuzzyNumeric() throws Exception { @@ -440,18 +439,13 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase parseQuery(invalidQueryAsString)); } public void testToQueryBooleanQueryMultipleBoosts() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index 34e71427cbd..b4fda42177e 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -172,27 +172,10 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase new RangeQueryBuilder("")); RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("test"); - try { - if (randomBoolean()) { - rangeQueryBuilder.timeZone(null); - } else { - rangeQueryBuilder.timeZone("badID"); - } - fail("cannot be null or unknown id"); - } catch (IllegalArgumentException e) { - // expected - } - - try { - if (randomBoolean()) { - rangeQueryBuilder.format(null); - } else { - rangeQueryBuilder.format("badFormat"); - } - fail("cannot be null or bad format"); - } catch (IllegalArgumentException e) { - // expected - } + expectThrows(IllegalArgumentException.class, () -> rangeQueryBuilder.timeZone(null)); + expectThrows(IllegalArgumentException.class, () -> rangeQueryBuilder.timeZone("badID")); + expectThrows(IllegalArgumentException.class, () -> rangeQueryBuilder.format(null)); + expectThrows(IllegalArgumentException.class, () -> rangeQueryBuilder.format("badFormat")); } /** @@ -201,12 +184,8 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase query.toQuery(createShardContext())); + assertThat(e.getMessage(), containsString("[range] time_zone can not be applied")); } /** @@ -215,12 +194,8 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase query.toQuery(createShardContext())); + assertThat(e.getMessage(), containsString("[range] time_zone can not be applied")); } public void testToQueryNumericField() throws IOException { @@ -271,7 +246,7 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase rewrittenQuery.rewrite(null)); } public void testDateRangeBoundaries() throws IOException { @@ -383,12 +354,8 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase queryBuilder.toQuery(createShardContext())); } public void testFromJson() throws IOException { @@ -427,7 +394,7 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase parseQuery(deprecatedJson, ParseFieldMatcher.STRICT)); + assertEquals("Deprecated field [_name] used, replaced by [query name is not supported in short version of range query]", + e.getMessage()); } public void testRewriteDateToMatchAll() throws IOException { @@ -461,8 +426,6 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertEquals("[range] query doesn't support multiple fields, found [age] and [price]", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java index 4e5dd65153a..41cfc2c63ff 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java @@ -83,23 +83,13 @@ public class RegexpQueryBuilderTests extends AbstractQueryTestCase new RegexpQueryBuilder(null, "text")); + assertEquals("field name is null or empty", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> new RegexpQueryBuilder("", "text")); + assertEquals("field name is null or empty", e.getMessage()); - try { - new RegexpQueryBuilder("field", null); - fail("cannot be null or empty"); - } catch (IllegalArgumentException e) { - assertEquals("value cannot be null", e.getMessage()); - } + e = expectThrows(IllegalArgumentException.class, () -> new RegexpQueryBuilder("field", null)); + assertEquals("value cannot be null", e.getMessage()); } public void testFromJson() throws IOException { @@ -126,8 +116,7 @@ public class RegexpQueryBuilderTests extends AbstractQueryTestCase 0); RegexpQueryBuilder query = new RegexpQueryBuilder(INT_FIELD_NAME, "12"); QueryShardContext context = createShardContext(); - QueryShardException e = expectThrows(QueryShardException.class, - () -> query.toQuery(context)); + QueryShardException e = expectThrows(QueryShardException.class, () -> query.toQuery(context)); assertEquals("Can only use regexp queries on keyword and text fields - not on [mapped_int] which is of type [integer]", e.getMessage()); } @@ -144,12 +133,7 @@ public class RegexpQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertEquals("[regexp] query doesn't support multiple fields, found [user1] and [user2]", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index 9168b489eb2..93fbcfd930f 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -179,42 +179,26 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase qb.field(null)); + assertEquals("supplied field is null or empty", e.getMessage()); } public void testFieldCannotBeNullAndWeighted() { SimpleQueryStringBuilder qb = createTestQueryBuilder(); - try { - qb.field(null, AbstractQueryBuilder.DEFAULT_BOOST); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("supplied field is null or empty.")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> qb.field(null, AbstractQueryBuilder.DEFAULT_BOOST)); + assertEquals("supplied field is null or empty", e.getMessage()); } public void testFieldCannotBeEmpty() { SimpleQueryStringBuilder qb = createTestQueryBuilder(); - try { - qb.field(""); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("supplied field is null or empty.")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> qb.field("")); + assertEquals("supplied field is null or empty", e.getMessage()); } public void testFieldCannotBeEmptyAndWeighted() { SimpleQueryStringBuilder qb = createTestQueryBuilder(); - try { - qb.field("", AbstractQueryBuilder.DEFAULT_BOOST); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("supplied field is null or empty.")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> qb.field("", AbstractQueryBuilder.DEFAULT_BOOST)); + assertEquals("supplied field is null or empty", e.getMessage()); } /** @@ -223,12 +207,8 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase qb.fields(null)); + assertEquals("fields cannot be null", e.getMessage()); } public void testDefaultFieldParsing() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/SpanContainingQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SpanContainingQueryBuilderTests.java index 96b89fa521c..abaa35818c9 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SpanContainingQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SpanContainingQueryBuilderTests.java @@ -40,19 +40,9 @@ public class SpanContainingQueryBuilderTests extends AbstractQueryTestCase new SpanContainingQueryBuilder(null, spanTermQuery)); + expectThrows(IllegalArgumentException.class, () -> new SpanContainingQueryBuilder(spanTermQuery, null)); } public void testFromJson() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/SpanFirstQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SpanFirstQueryBuilderTests.java index a0279a7942e..d74ae56fe0f 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SpanFirstQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SpanFirstQueryBuilderTests.java @@ -56,12 +56,8 @@ public class SpanFirstQueryBuilderTests extends AbstractQueryTestCase parseQuery(builder.string())); + assertTrue(e.getMessage().contains("spanFirst must have [end] set")); } { XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -71,12 +67,8 @@ public class SpanFirstQueryBuilderTests extends AbstractQueryTestCase parseQuery(builder.string())); + assertTrue(e.getMessage().contains("spanFirst must have [match] span query clause")); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java index 81d7598df1d..8abaee66725 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -72,15 +73,14 @@ public class SpanMultiTermQueryBuilderTests extends AbstractQueryTestCase 0 && context.fieldMapper(DATE_FIELD_NAME) != null) { - try { - RangeQueryBuilder query = new RangeQueryBuilder(DATE_FIELD_NAME); - new SpanMultiTermQueryBuilder(query).toQuery(createShardContext()); - fail("Exception expected, range query on date fields should not generate a lucene " + MultiTermQuery.class.getName()); - } catch (UnsupportedOperationException e) { - assert(e.getMessage().contains("unsupported inner query, should be " + MultiTermQuery.class.getName())); - } - } + assumeTrue("test runs only if there is a registered type", + getCurrentTypes().length > 0 && context.fieldMapper(DATE_FIELD_NAME) != null); + + RangeQueryBuilder query = new RangeQueryBuilder(DATE_FIELD_NAME); + SpanMultiTermQueryBuilder spamMultiTermQuery = new SpanMultiTermQueryBuilder(query); + UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, + () -> spamMultiTermQuery.toQuery(createShardContext())); + assertThat(e.getMessage(), containsString("unsupported inner query, should be " + MultiTermQuery.class.getName())); } public void testToQueryInnerSpanMultiTerm() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java index ccfda9596f3..b8a9dcd2afc 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java @@ -62,18 +62,9 @@ public class SpanNotQueryBuilderTests extends AbstractQueryTestCase new SpanNotQueryBuilder(null, spanTermQuery)); + expectThrows(IllegalArgumentException.class, () -> new SpanNotQueryBuilder(spanTermQuery, null)); } public void testDist() { @@ -136,12 +127,8 @@ public class SpanNotQueryBuilderTests extends AbstractQueryTestCase parseQuery(builder.string())); + assertThat(e.getDetailedMessage(), containsString("spanNot must have [include]")); } { XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -154,12 +141,8 @@ public class SpanNotQueryBuilderTests extends AbstractQueryTestCase parseQuery(builder.string())); + assertThat(e.getDetailedMessage(), containsString("spanNot must have [exclude]")); } { XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -175,12 +158,8 @@ public class SpanNotQueryBuilderTests extends AbstractQueryTestCase parseQuery(builder.string())); + assertThat(e.getDetailedMessage(), containsString("spanNot can either use [dist] or [pre] & [post] (or none)")); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java index da76dd15371..38be4ea346c 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java @@ -100,12 +100,9 @@ public class SpanTermQueryBuilderTests extends AbstractTermQueryTestCase parseQuery(json)); + assertEquals("[span_term] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/SpanWithinQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SpanWithinQueryBuilderTests.java index a05a2a1af81..ef684412e54 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SpanWithinQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SpanWithinQueryBuilderTests.java @@ -40,19 +40,9 @@ public class SpanWithinQueryBuilderTests extends AbstractQueryTestCase new SpanWithinQueryBuilder(null, spanTermQuery)); + expectThrows(IllegalArgumentException.class, () -> new SpanWithinQueryBuilder(spanTermQuery, null)); } public void testFromJson() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java index fbb2e67b621..276768183b0 100644 --- a/core/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.query; +import com.fasterxml.jackson.core.io.JsonStringEncoder; import org.apache.lucene.index.Term; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; @@ -27,14 +28,11 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.index.mapper.MappedFieldType; -import com.fasterxml.jackson.core.io.JsonStringEncoder; - import java.io.IOException; -import static org.hamcrest.Matchers.either; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.either; public class TermQueryBuilderTests extends AbstractTermQueryTestCase { @@ -115,12 +113,8 @@ public class TermQueryBuilderTests extends AbstractTermQueryTestCase parseQuery(queryAsString)); + assertEquals("[term] query does not support array of values", e.getMessage()); } public void testFromJson() throws IOException { @@ -136,7 +130,6 @@ public class TermQueryBuilderTests extends AbstractTermQueryTestCase 0); TermQueryBuilder query = new TermQueryBuilder(GEO_POINT_FIELD_NAME, "2,3"); QueryShardContext context = createShardContext(); - QueryShardException e = expectThrows(QueryShardException.class, - () -> query.toQuery(context)); + QueryShardException e = expectThrows(QueryShardException.class, () -> query.toQuery(context)); assertEquals("Geo fields do not support exact searching, use dedicated geo queries instead: [mapped_geo_point]", e.getMessage()); } @@ -161,12 +153,7 @@ public class TermQueryBuilderTests extends AbstractTermQueryTestCase parseQuery(json)); + assertEquals("[term] query does not support different field names, use [bool] query instead", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index 985669394d9..74c55da15b5 100644 --- a/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.indices.TermsLookup; import org.elasticsearch.test.AbstractQueryTestCase; -import org.hamcrest.Matchers; import org.junit.Before; import java.io.IOException; @@ -49,7 +48,6 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; public class TermsQueryBuilderTests extends AbstractQueryTestCase { private List randomTerms; @@ -146,56 +144,32 @@ public class TermsQueryBuilderTests extends AbstractQueryTestCase new TermsQueryBuilder(null, "term")); + assertEquals("field name cannot be null.", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> new TermsQueryBuilder("", "term")); + assertEquals("field name cannot be null.", e.getMessage()); } public void testEmtpyTermsLookup() { - try { - new TermsQueryBuilder("field", (TermsLookup) null); - fail("Expected IllegalArgumentException"); - } catch(IllegalArgumentException e) { - assertThat(e.getMessage(), is("No value or termsLookup specified for terms query")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TermsQueryBuilder("field", (TermsLookup) null)); + assertEquals("No value or termsLookup specified for terms query", e.getMessage()); } public void testNullValues() { - try { - switch (randomInt(6)) { - case 0: - new TermsQueryBuilder("field", (String[]) null); - break; - case 1: - new TermsQueryBuilder("field", (int[]) null); - break; - case 2: - new TermsQueryBuilder("field", (long[]) null); - break; - case 3: - new TermsQueryBuilder("field", (float[]) null); - break; - case 4: - new TermsQueryBuilder("field", (double[]) null); - break; - case 5: - new TermsQueryBuilder("field", (Object[]) null); - break; - default: - new TermsQueryBuilder("field", (Iterable) null); - break; - } - fail("should have failed with IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), Matchers.containsString("No value specified for terms query")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TermsQueryBuilder("field", (String[]) null)); + assertThat(e.getMessage(), containsString("No value specified for terms query")); + e = expectThrows(IllegalArgumentException.class, () -> new TermsQueryBuilder("field", (int[]) null)); + assertThat(e.getMessage(), containsString("No value specified for terms query")); + e = expectThrows(IllegalArgumentException.class, () -> new TermsQueryBuilder("field", (long[]) null)); + assertThat(e.getMessage(), containsString("No value specified for terms query")); + e = expectThrows(IllegalArgumentException.class, () -> new TermsQueryBuilder("field", (float[]) null)); + assertThat(e.getMessage(), containsString("No value specified for terms query")); + e = expectThrows(IllegalArgumentException.class, () -> new TermsQueryBuilder("field", (double[]) null)); + assertThat(e.getMessage(), containsString("No value specified for terms query")); + e = expectThrows(IllegalArgumentException.class, () -> new TermsQueryBuilder("field", (Object[]) null)); + assertThat(e.getMessage(), containsString("No value specified for terms query")); + e = expectThrows(IllegalArgumentException.class, () -> new TermsQueryBuilder("field", (Iterable) null)); + assertThat(e.getMessage(), containsString("No value specified for terms query")); } public void testBothValuesAndLookupSet() throws IOException { @@ -213,12 +187,9 @@ public class TermsQueryBuilderTests extends AbstractQueryTestCase parseQuery(query)); + assertThat(e.getMessage(), containsString("[" + TermsQueryBuilder.NAME + "] query does not support more than one field.")); } @Override @@ -267,12 +238,8 @@ public class TermsQueryBuilderTests extends AbstractQueryTestCase parseQuery(query)); + assertEquals("[" + TermsQueryBuilder.NAME + "] query does not support multiple fields", e.getMessage()); } public void testFromJson() throws IOException { @@ -288,7 +255,7 @@ public class TermsQueryBuilderTests extends AbstractQueryTestCase parseQuery(deprecatedJson)); + assertEquals("Deprecated field [in] used, expected [terms] instead", e.getMessage()); } @Override public void testMustRewrite() throws IOException { TermsQueryBuilder termsQueryBuilder = new TermsQueryBuilder(STRING_FIELD_NAME, randomTermsLookup()); - try { - termsQueryBuilder.toQuery(createShardContext()); - fail(); - } catch (UnsupportedOperationException ex) { - assertEquals("query must be rewritten first", ex.getMessage()); - } + UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, + () -> termsQueryBuilder.toQuery(createShardContext())); + assertEquals("query must be rewritten first", e.getMessage()); assertEquals(termsQueryBuilder.rewrite(createShardContext()), new TermsQueryBuilder(STRING_FIELD_NAME, randomTerms.stream().filter(x -> x != null).collect(Collectors.toList()))); // terms lookup removes null values } diff --git a/core/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java index a3265d06d70..5e02aba6ec6 100644 --- a/core/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java @@ -44,12 +44,7 @@ public class TypeQueryBuilderTests extends AbstractQueryTestCase new TypeQueryBuilder((String) null)); } public void testFromJson() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java index b987c3b9a3d..31eee307152 100644 --- a/core/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java @@ -75,40 +75,26 @@ public class WildcardQueryBuilderTests extends AbstractQueryTestCase new WildcardQueryBuilder(null, "text")); + assertEquals("field name is null or empty", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> new WildcardQueryBuilder("", "text")); + assertEquals("field name is null or empty", e.getMessage()); - try { - new WildcardQueryBuilder("field", null); - fail("cannot be null or empty"); - } catch (IllegalArgumentException e) { - assertEquals("value cannot be null", e.getMessage()); - } + e = expectThrows(IllegalArgumentException.class, () -> new WildcardQueryBuilder("field", null)); + assertEquals("value cannot be null", e.getMessage()); } public void testEmptyValue() throws IOException { QueryShardContext context = createShardContext(); context.setAllowUnmappedFields(true); - WildcardQueryBuilder wildcardQueryBuilder = new WildcardQueryBuilder(getRandomType(), ""); assertEquals(wildcardQueryBuilder.toQuery(context).getClass(), WildcardQuery.class); } public void testFromJson() throws IOException { - String json = - "{ \"wildcard\" : { \"user\" : { \"wildcard\" : \"ki*y\", \"boost\" : 2.0 } }}"; - + String json = "{ \"wildcard\" : { \"user\" : { \"wildcard\" : \"ki*y\", \"boost\" : 2.0 } }}"; WildcardQueryBuilder parsed = (WildcardQueryBuilder) parseQuery(json); checkGeneratedJson(json, parsed); - assertEquals(json, "ki*y", parsed.value()); assertEquals(json, 2.0, parsed.boost(), 0.0001); } @@ -125,12 +111,7 @@ public class WildcardQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertEquals("[wildcard] query doesn't support multiple fields, found [user1] and [user2]", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/WrapperQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/WrapperQueryBuilderTests.java index 492b27f4957..ee507365c56 100644 --- a/core/src/test/java/org/elasticsearch/index/query/WrapperQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/WrapperQueryBuilderTests.java @@ -61,38 +61,12 @@ public class WrapperQueryBuilderTests extends AbstractQueryTestCase new WrapperQueryBuilder((byte[]) null)); + expectThrows(IllegalArgumentException.class, () -> new WrapperQueryBuilder(new byte[0])); + expectThrows(IllegalArgumentException.class, () -> new WrapperQueryBuilder((String) null)); + expectThrows(IllegalArgumentException.class, () -> new WrapperQueryBuilder("")); + expectThrows(IllegalArgumentException.class, () -> new WrapperQueryBuilder((BytesReference) null)); + expectThrows(IllegalArgumentException.class, () -> new WrapperQueryBuilder(new BytesArray(new byte[0]))); } /** @@ -102,12 +76,9 @@ public class WrapperQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertTrue(e.getMessage().contains("bogusField")); } public void testFromJson() throws IOException { @@ -133,12 +104,8 @@ public class WrapperQueryBuilderTests extends AbstractQueryTestCase qb.toQuery(createShardContext())); + assertEquals("this query must be rewritten first", e.getMessage()); QueryBuilder rewrite = qb.rewrite(createShardContext()); assertEquals(tqb, rewrite); } diff --git a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index bd841a05ca1..62f3cf4504e 100644 --- a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -414,12 +414,8 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase parseQuery(functionScoreQuery)); + assertThat(e.getMessage(), containsString("use [functions] array if you want to define several functions.")); } public void testProperErrorMessageWhenTwoFunctionsDefinedInFunctionsArray() throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index f59afcb40d8..64baa359447 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -281,13 +281,9 @@ public abstract class AbstractQueryTestCase> } while (testQuery.toString().contains(marker)); testQuery.queryName(marker); // to find root query to add additional bogus field there String queryAsString = testQuery.toString().replace("\"" + marker + "\"", "\"" + marker + "\", \"bogusField\" : \"someValue\""); - try { - parseQuery(queryAsString); - fail("ParsingException expected."); - } catch (ParsingException e) { - // we'd like to see the offending field name here - assertThat(e.getMessage(), containsString("bogusField")); - } + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(queryAsString)); + // we'd like to see the offending field name here + assertThat(e.getMessage(), containsString("bogusField")); } /** @@ -344,12 +340,8 @@ public abstract class AbstractQueryTestCase> validQuery.substring(insertionPosition, endArrayPosition) + "]" + validQuery.substring(endArrayPosition, validQuery.length()); - try { - parseQuery(testQuery); - fail("some parsing exception expected for query: " + testQuery); - } catch (ParsingException e) { - assertEquals("[" + queryName + "] query malformed, no start_object after query name", e.getMessage()); - } + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(testQuery)); + assertEquals("[" + queryName + "] query malformed, no start_object after query name", e.getMessage()); } /** From c32a4324b03d66508e125d34cc5bc1031ef25367 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 5 Aug 2016 14:11:01 +0200 Subject: [PATCH 065/103] Add NamedWriteables from plugins to TransportClient Plugins provide NamedWriteables that are added to the NamedWriteableRegistry. Those are added on Nodes already, the same mechanism is added to the setup for TransportClient. --- .../org/elasticsearch/client/transport/TransportClient.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index e5538aa9917..aa11b389555 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -124,6 +124,9 @@ public abstract class TransportClient extends AbstractClient { List entries = new ArrayList<>(); entries.addAll(networkModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); + entries.addAll(pluginsService.filterPlugins(Plugin.class).stream() + .flatMap(p -> p.getNamedWriteables().stream()) + .collect(Collectors.toList())); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries); ModulesBuilder modules = new ModulesBuilder(); From 284b9794c05a18f3d47e83d32630977f796d83fe Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 5 Aug 2016 14:51:47 +0200 Subject: [PATCH 066/103] Do not parse the created version from the settings every time a field is parsed. #19824 I found it while looking at some jfr telemetry reports from Rally. --- .../main/java/org/elasticsearch/index/mapper/FieldMapper.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index d74d747d22b..f1126f35fb1 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -238,6 +238,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable { } } + private final Version indexCreatedVersion; protected MappedFieldType fieldType; protected final MappedFieldType defaultFieldType; protected MultiFields multiFields; @@ -246,6 +247,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable { protected FieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName); assert indexSettings != null; + this.indexCreatedVersion = Version.indexCreated(indexSettings); fieldType.freeze(); this.fieldType = fieldType; defaultFieldType.freeze(); @@ -283,7 +285,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable { if (!customBoost() // don't set boosts eg. on dv fields && field.fieldType().indexOptions() != IndexOptions.NONE - && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { + && indexCreatedVersion.before(Version.V_5_0_0_alpha1)) { field.setBoost(fieldType().boost()); } context.doc().add(field); From f59ca9083b78c867bf750250dd358922f8aae58b Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Fri, 5 Aug 2016 09:39:02 -0400 Subject: [PATCH 067/103] Snapshot repository cleans up empty index folders (#19751) This commit cleans up indices in a snapshot repository when all snapshots containing the index are all deleted. Previously, empty indices folders would lay around after all snapshots containing them were deleted. --- .../common/blobstore/fs/FsBlobContainer.java | 16 ++++- .../elasticsearch/repositories/IndexId.java | 8 +++ .../blobstore/BlobStoreRepository.java | 25 +++++++- .../snapshots/FsBlobStoreRepositoryIT.java | 2 +- ...eCloudStorageBlobStoreRepositoryTests.java | 2 +- .../ESBlobStoreRepositoryIntegTestCase.java | 59 ++++++++++++++++++- 6 files changed, 107 insertions(+), 5 deletions(-) rename test/framework/src/main/java/org/elasticsearch/repositories/{ => blobstore}/ESBlobStoreRepositoryIntegTestCase.java (76%) diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index b46b555a8fe..757cce7d837 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -33,9 +33,11 @@ import java.io.InputStream; import java.io.OutputStream; import java.nio.file.DirectoryStream; import java.nio.file.FileAlreadyExistsException; +import java.nio.file.FileVisitResult; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; import java.nio.file.StandardCopyOption; import java.nio.file.StandardOpenOption; import java.nio.file.attribute.BasicFileAttributes; @@ -89,7 +91,19 @@ public class FsBlobContainer extends AbstractBlobContainer { @Override public void deleteBlob(String blobName) throws IOException { Path blobPath = path.resolve(blobName); - Files.delete(blobPath); + if (Files.isDirectory(blobPath)) { + // delete directory recursively as long as it is empty (only contains empty directories), + // which is the reason we aren't deleting any files, only the directories on the post-visit + Files.walkFileTree(blobPath, new SimpleFileVisitor() { + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + }); + } else { + Files.delete(blobPath); + } } @Override diff --git a/core/src/main/java/org/elasticsearch/repositories/IndexId.java b/core/src/main/java/org/elasticsearch/repositories/IndexId.java index 434582e61ed..e86d0939b2e 100644 --- a/core/src/main/java/org/elasticsearch/repositories/IndexId.java +++ b/core/src/main/java/org/elasticsearch/repositories/IndexId.java @@ -38,15 +38,19 @@ public final class IndexId implements Writeable, ToXContent { private final String name; private final String id; + private final int hashCode; public IndexId(final String name, final String id) { this.name = name; this.id = id; + this.hashCode = computeHashCode(); + } public IndexId(final StreamInput in) throws IOException { this.name = in.readString(); this.id = in.readString(); + this.hashCode = computeHashCode(); } /** @@ -90,6 +94,10 @@ public final class IndexId implements Writeable, ToXContent { @Override public int hashCode() { + return hashCode; + } + + private int computeHashCode() { return Objects.hash(name, id); } diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 2bb92dd0c23..856b3ff4264 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -101,6 +101,7 @@ import java.io.FileNotFoundException; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.file.DirectoryNotEmptyException; import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.Collection; @@ -406,7 +407,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } try { // Delete snapshot from the index file, since it is the maintainer of truth of active snapshots - writeIndexGen(repositoryData.removeSnapshot(snapshotId)); + final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId); + writeIndexGen(updatedRepositoryData); // delete the snapshot file safeSnapshotBlobDelete(snapshot, snapshotId.getUUID()); @@ -436,6 +438,27 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } } } + + // cleanup indices that are no longer part of the repository + final Collection indicesToCleanUp = Sets.newHashSet(repositoryData.getIndices().values()); + indicesToCleanUp.removeAll(updatedRepositoryData.getIndices().values()); + final BlobContainer indicesBlobContainer = blobStore().blobContainer(basePath().add("indices")); + for (final IndexId indexId : indicesToCleanUp) { + try { + indicesBlobContainer.deleteBlob(indexId.getId()); + } catch (DirectoryNotEmptyException dnee) { + // if the directory isn't empty for some reason, it will fail to clean up; + // we'll ignore that and accept that cleanup didn't fully succeed. + // since we are using UUIDs for path names, this won't be an issue for + // snapshotting indices of the same name + logger.debug("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + + "its index folder due to the directory not being empty.", dnee, metadata.name(), indexId); + } catch (IOException ioe) { + // a different IOException occurred while trying to delete - will just log the issue for now + logger.debug("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + + "its index folder.", ioe, metadata.name(), indexId); + } + } } catch (IOException ex) { throw new RepositoryException(metadata.name(), "failed to update snapshot in repository", ex); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java b/core/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java index 84c3a03f2c8..792b1bdbddd 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java @@ -20,7 +20,7 @@ package org.elasticsearch.snapshots; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.repositories.ESBlobStoreRepositoryIntegTestCase; +import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index f9548e7e2ea..095c0c2b1c7 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.plugin.repository.gcs.GoogleCloudStoragePlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.repositories.ESBlobStoreRepositoryIntegTestCase; +import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; import org.junit.BeforeClass; import java.util.Collection; diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java similarity index 76% rename from test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreRepositoryIntegTestCase.java rename to test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index 2ffd30fa470..d61155ecd60 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -16,13 +16,18 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.repositories; +package org.elasticsearch.repositories.blobstore; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.test.ESIntegTestCase; import java.util.Arrays; @@ -161,6 +166,58 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase } } + public void testIndicesDeletedFromRepository() throws Exception { + Client client = client(); + + logger.info("--> creating repository"); + final String repoName = "test-repo"; + createTestRepository(repoName); + + createIndex("test-idx-1", "test-idx-2", "test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 20; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + + logger.info("--> take a snapshot"); + CreateSnapshotResponse createSnapshotResponse = + client.admin().cluster().prepareCreateSnapshot(repoName, "test-snap").setWaitForCompletion(true).get(); + assertEquals(createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards()); + + logger.info("--> indexing more data"); + for (int i = 20; i < 40; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); + } + + logger.info("--> take another snapshot with only 2 of the 3 indices"); + createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repoName, "test-snap2") + .setWaitForCompletion(true) + .setIndices("test-idx-1", "test-idx-2") + .get(); + assertEquals(createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards()); + + logger.info("--> delete a snapshot"); + assertAcked(client().admin().cluster().prepareDeleteSnapshot(repoName, "test-snap").get()); + + logger.info("--> verify index folder deleted from blob container"); + RepositoriesService repositoriesSvc = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); + @SuppressWarnings("unchecked") BlobStoreRepository repository = (BlobStoreRepository) repositoriesSvc.repository(repoName); + BlobContainer indicesBlobContainer = repository.blobStore().blobContainer(repository.basePath().add("indices")); + RepositoryData repositoryData = repository.getRepositoryData(); + for (IndexId indexId : repositoryData.getIndices().values()) { + if (indexId.getName().equals("test-idx-3")) { + assertFalse(indicesBlobContainer.blobExists(indexId.getId())); // deleted index + } + } + } + protected void addRandomDocuments(String name, int numDocs) throws ExecutionException, InterruptedException { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { From e1629356560b9d68dff22633a8c357cc3e6d79e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 5 Aug 2016 16:05:52 +0200 Subject: [PATCH 068/103] Add test to check that plugin NamedWriteables are registerd with TransportClient --- .../client/transport/TransportClientIT.java | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java index 761cc8cf0ae..59fbb94606d 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java @@ -22,11 +22,17 @@ package org.elasticsearch.client.transport; import org.elasticsearch.Version; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.env.Environment; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -34,6 +40,8 @@ import org.elasticsearch.transport.MockTransportClient; import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.Arrays; +import java.util.List; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -42,6 +50,7 @@ import static org.hamcrest.Matchers.startsWith; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 1.0) public class TransportClientIT extends ESIntegTestCase { + public void testPickingUpChangesInDiscoveryNode() { String nodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false)); @@ -97,4 +106,47 @@ public class TransportClientIT extends ESIntegTestCase { assertThat(Client.CLIENT_TYPE_SETTING_S.get(settings), is("transport")); } } + + /** + * test that when plugins are provided that want to register + * {@link NamedWriteable}, those are also made known to the + * {@link NamedWriteableRegistry} of the transport client + */ + public void testPluginNamedWriteablesRegistered() { + Settings baseSettings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .build(); + try (TransportClient client = new MockTransportClient(baseSettings, pluginList(MockPlugin.class))) { + NamedWriteableRegistry registry = client.injector.getInstance(NamedWriteableRegistry.class); + assertNotNull(registry.getReader(MockPlugin.MockNamedWriteable.class, MockPlugin.MockNamedWriteable.NAME)); + } + } + + public static class MockPlugin extends Plugin { + + @Override + public List getNamedWriteables() { + return Arrays.asList(new Entry[]{ new Entry(MockNamedWriteable.class, MockNamedWriteable.NAME, MockNamedWriteable::new)}); + } + + public class MockNamedWriteable implements NamedWriteable { + + static final String NAME = "mockNamedWritable"; + + MockNamedWriteable(StreamInput in) { + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + @Override + public String getWriteableName() { + return NAME; + } + + } + } + + } From 8bebf2599ee0f1ab270e95a709477e8d59709bc1 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 5 Aug 2016 11:01:22 -0400 Subject: [PATCH 069/103] Add note explaining analysis caching for plugins ``` Elasticsearch doesn't have any automatic mechanism to share these components between indexes. If any component is heavy enough to warrant such sharing then it is the Pugin's responsibility to do it in their {@link AnalysisProvider} implementation. We recommend against doing this unless absolutely necessary because it can be difficult to get the caching right given things like behavior changes across versions. ``` Closes #19814 --- .../main/java/org/elasticsearch/plugins/AnalysisPlugin.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java b/core/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java index ffd278a5653..8c23e530e49 100644 --- a/core/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java @@ -45,6 +45,10 @@ import static java.util.Collections.emptyMap; * } * } * } + * + * Elasticsearch doesn't have any automatic mechanism to share these components between indexes. If any component is heavy enough to warrant + * such sharing then it is the Pugin's responsibility to do it in their {@link AnalysisProvider} implementation. We recommend against doing + * this unless absolutely necessary because it can be difficult to get the caching right given things like behavior changes across versions. */ public interface AnalysisPlugin { /** From 899cddefb68333618f4d22de0fb09fc5c7623382 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Fri, 5 Aug 2016 17:13:26 +0200 Subject: [PATCH 070/103] make ctors protected (#19831) This is useful if we need an acknowledged instance in a test --- .../action/admin/indices/mapping/put/PutMappingResponse.java | 4 ++-- .../admin/indices/template/put/PutIndexTemplateResponse.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java index 8ddec5e259f..64b3c77f050 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java @@ -30,11 +30,11 @@ import java.io.IOException; */ public class PutMappingResponse extends AcknowledgedResponse { - PutMappingResponse() { + protected PutMappingResponse() { } - PutMappingResponse(boolean acknowledged) { + protected PutMappingResponse(boolean acknowledged) { super(acknowledged); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponse.java index 5953642d80b..bf6e05a6c7b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponse.java @@ -29,10 +29,10 @@ import java.io.IOException; */ public class PutIndexTemplateResponse extends AcknowledgedResponse { - PutIndexTemplateResponse() { + protected PutIndexTemplateResponse() { } - PutIndexTemplateResponse(boolean acknowledged) { + protected PutIndexTemplateResponse(boolean acknowledged) { super(acknowledged); } From 981478e4a98ca126d8fa17f865e025b9321512ce Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Fri, 5 Aug 2016 19:10:13 +0200 Subject: [PATCH 071/103] mute test --- .../elasticsearch/search/aggregations/bucket/HistogramIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index 554df5304b7..f9334193eef 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -886,6 +886,7 @@ public class HistogramIT extends ESIntegTestCase { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/19833") public void testEmptyWithExtendedBounds() throws Exception { int lastDataBucketKey = (numValueBuckets - 1) * interval; From e57f76aa2da2aef2241744f93702682dc5d843cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 5 Aug 2016 18:55:46 +0200 Subject: [PATCH 072/103] Ensure PutMappingRequest.buildFromSimplifiedDef fails when input isn't pairs The method requires pairs of fieldnames and property arguments and will fail if the varargs input is an uneven number. We should check this and fail with an appropriate IllegalArgumentException instead. --- .../admin/indices/mapping/put/PutMappingRequest.java | 10 ++++++++++ .../indices/mapping/put/PutMappingRequestTests.java | 7 +++++++ 2 files changed, 17 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index c638a429b10..152bc516549 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -177,7 +177,17 @@ public class PutMappingRequest extends AcknowledgedRequest im return source(buildFromSimplifiedDef(type, source)); } + /** + * @param type the mapping type + * @param source consisting of field/properties pairs (e.g. "field1", + * "type=string,store=true"). If the number of arguments is not + * divisible by two an {@link IllegalArgumentException} is thrown + * @return the mappings definition + */ public static XContentBuilder buildFromSimplifiedDef(String type, Object... source) { + if (source.length % 2 != 0) { + throw new IllegalArgumentException("mapping source must be pairs of fieldnames and properties definition."); + } try { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java index 04892b82339..9c93e5c73d9 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java @@ -57,4 +57,11 @@ public class PutMappingRequestTests extends ESTestCase { "Validation Failed: 1: either concrete index or unresolved indices can be set," + " concrete index: [[foo/bar]] and indices: [myindex];"); } + + public void testBuildFromSimplifiedDef() { + // test that method rejects input where input varargs fieldname/properites are not paired correctly + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> PutMappingRequest.buildFromSimplifiedDef("type", "only_field")); + assertEquals("mapping source must be pairs of fieldnames and properties definition.", e.getMessage()); + } } From 10d64eb43a1a33a404b013f5a49812c0dee59d46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 5 Aug 2016 19:42:52 +0200 Subject: [PATCH 073/103] Remove unneeded 140 character line suppresions --- .../resources/checkstyle_suppressions.xml | 54 ------------------- 1 file changed, 54 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 86ec8544994..75b3ecd240b 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -17,16 +17,12 @@ - - - - @@ -49,11 +45,9 @@ - - @@ -185,7 +179,6 @@ - @@ -202,7 +195,6 @@ - @@ -244,7 +236,6 @@ - @@ -273,18 +264,12 @@ - - - - - - @@ -300,7 +285,6 @@ - @@ -319,7 +303,6 @@ - @@ -354,7 +337,6 @@ - @@ -376,7 +358,6 @@ - @@ -407,7 +388,6 @@ - @@ -494,7 +474,6 @@ - @@ -520,13 +499,10 @@ - - - @@ -551,7 +527,6 @@ - @@ -569,7 +544,6 @@ - @@ -600,7 +574,6 @@ - @@ -614,7 +587,6 @@ - @@ -695,8 +667,6 @@ - - @@ -730,9 +700,7 @@ - - @@ -784,9 +752,7 @@ - - @@ -794,10 +760,7 @@ - - - @@ -911,7 +874,6 @@ - @@ -950,7 +912,6 @@ - @@ -981,18 +942,15 @@ - - - @@ -1012,8 +970,6 @@ - - @@ -1060,26 +1016,21 @@ - - - - - @@ -1107,13 +1058,11 @@ - - @@ -1136,7 +1085,6 @@ - @@ -1166,7 +1114,6 @@ - @@ -1181,7 +1128,6 @@ - From a62740bbd2f5657cf1f2cf38b21d3f2c1d38d09e Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 5 Aug 2016 14:58:33 -0400 Subject: [PATCH 074/103] Avoid early initializing Netty Today when we load the Netty plugins, we indirectly cause several Netty classes to initialize. This is because we attempt to load some classes by name, and loading these classes is done in a way that triggers a long chain of class initializers within Netty. We should not do this, this can lead to log messages before the logger is loader, and it leads to initialization in cases when the classes would never be needed (for example, Netty 3 class initialization is never needed if Netty 4 is used, and vice versa). This commit avoids this early initialization of these classes by removing the need for the early loading. Relates #19819 --- .../client/PreBuiltTransportClient.java | 20 ------------- .../ReindexFromRemoteWithAuthTests.java | 9 +++--- .../index/reindex/RetryTests.java | 16 +---------- .../elasticsearch/transport/Netty3Plugin.java | 27 +----------------- .../transport/netty3/Netty3Utils.java | 22 +++++++++++++++ .../elasticsearch/transport/Netty4Plugin.java | 28 ------------------- .../plugin-metadata/plugin-security.policy | 4 +-- .../Netty4TransportPublishAddressIT.java | 1 - .../smoketest/ESSmokeClientTestCase.java | 1 - .../elasticsearch/http/HttpSmokeTestCase.java | 20 ++----------- 10 files changed, 32 insertions(+), 116 deletions(-) diff --git a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java index cc7e722d802..dbd04079d53 100644 --- a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java +++ b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java @@ -21,7 +21,6 @@ package org.elasticsearch.transport.client; import io.netty.util.ThreadDeathWatcher; import io.netty.util.concurrent.GlobalEventExecutor; -import org.apache.lucene.util.IOUtils; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Setting; @@ -57,7 +56,6 @@ public class PreBuiltTransportClient extends TransportClient { Arrays.asList( Netty3Plugin.class, Netty4Plugin.class, - TransportPlugin.class, ReindexPlugin.class, PercolatorPlugin.class, MustachePlugin.class)); @@ -71,24 +69,6 @@ public class PreBuiltTransportClient extends TransportClient { super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS)); } - public static final class TransportPlugin extends Plugin { - - private static final Setting ASSERT_NETTY_BUGLEVEL = - Setting.boolSetting("netty.assert.buglevel", true, Setting.Property.NodeScope); - - @Override - public List> getSettings() { - return Collections.singletonList(ASSERT_NETTY_BUGLEVEL); - } - - @Override - public Settings additionalSettings() { - return Settings.builder().put("netty.assert.buglevel", true) - .build(); - } - - } - @Override public void close() { super.close(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java index d305fc77331..5c7a90157e7 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java @@ -60,17 +60,16 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Arrays.asList(RetryTests.BogusPlugin.class, - Netty4Plugin.class, - ReindexFromRemoteWithAuthTests.TestPlugin.class, - ReindexPlugin.class); + return Arrays.asList( + Netty4Plugin.class, + ReindexFromRemoteWithAuthTests.TestPlugin.class, + ReindexPlugin.class); } @Override protected Settings nodeSettings() { Settings.Builder settings = Settings.builder().put(super.nodeSettings()); // Weird incantation required to test with netty - settings.put("netty.assert.buglevel", false); settings.put(NetworkModule.HTTP_ENABLED.getKey(), true); // Whitelist reindexing from the http host we're going to use settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "myself"); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index ecebe141ce9..c0c1d681a09 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -93,20 +93,7 @@ public class RetryTests extends ESSingleNodeTestCase { return pluginList( ReindexPlugin.class, Netty3Plugin.class, - Netty4Plugin.class, - BogusPlugin.class); - } - - public static final class BogusPlugin extends Plugin { - // this runs without the permission from the netty module so it will fail since reindex can't set the property - // to make it still work we disable that check but need to register the setting first - private static final Setting ASSERT_NETTY_BUGLEVEL = Setting.boolSetting("netty.assert.buglevel", true, - Setting.Property.NodeScope); - - @Override - public List> getSettings() { - return Collections.singletonList(ASSERT_NETTY_BUGLEVEL); - } + Netty4Plugin.class); } /** @@ -115,7 +102,6 @@ public class RetryTests extends ESSingleNodeTestCase { @Override protected Settings nodeSettings() { Settings.Builder settings = Settings.builder().put(super.nodeSettings()); - settings.put("netty.assert.buglevel", false); // Use pools of size 1 so we can block them settings.put("thread_pool.bulk.size", 1); settings.put("thread_pool.search.size", 1); diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/Netty3Plugin.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/Netty3Plugin.java index d29532f9d20..a1f9985c6a0 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/Netty3Plugin.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/Netty3Plugin.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport; -import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -36,31 +35,6 @@ public class Netty3Plugin extends Plugin { public static final String NETTY_TRANSPORT_NAME = "netty3"; public static final String NETTY_HTTP_TRANSPORT_NAME = "netty3"; - public Netty3Plugin(Settings settings) { - SecurityManager sm = System.getSecurityManager(); - if (sm != null) { - sm.checkPermission(new SpecialPermission()); - } - AccessController.doPrivileged((PrivilegedAction) () -> { - try { - Class.forName("org.jboss.netty.channel.socket.nio.SelectorUtil"); - } catch (ClassNotFoundException e) { - throw new AssertionError(e); // we don't do anything with this - } - return null; - }); - /* - * Asserts that sun.nio.ch.bugLevel has been set to a non-null value. This assertion will fail if the corresponding code - * is not executed in a doPrivileged block. This can be disabled via `netty.assert.buglevel` setting which isn't registered - * by default but test can do so if they depend on the jar instead of the module. - */ - //TODO Once we have no jar level dependency we can get rid of this. - if (settings.getAsBoolean("netty.assert.buglevel", true)) { - assert System.getProperty("sun.nio.ch.bugLevel") != null : - "sun.nio.ch.bugLevel is null somebody pulls in SelectorUtil without doing stuff in a doPrivileged block?"; - } - } - @Override public List> getSettings() { return Arrays.asList( @@ -89,4 +63,5 @@ public class Netty3Plugin extends Plugin { } networkModule.registerTransport(NETTY_TRANSPORT_NAME, Netty3Transport.class); } + } diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java index 37fc483d4f4..17a367735d4 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java @@ -20,6 +20,7 @@ package org.elasticsearch.transport.netty3; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.Loggers; import org.jboss.netty.buffer.ChannelBuffer; @@ -30,6 +31,8 @@ import org.jboss.netty.util.ThreadNameDeterminer; import org.jboss.netty.util.ThreadRenamingRunnable; import java.io.IOException; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.ArrayList; /** @@ -102,6 +105,25 @@ public class Netty3Utils { }); ThreadRenamingRunnable.setThreadNameDeterminer(ES_THREAD_NAME_DETERMINER); + + // Netty 3 SelectorUtil wants to set this; however, it does not execute the property write + // in a privileged block so we just do what Netty wants to do here + final String key = "sun.nio.ch.bugLevel"; + final String buglevel = System.getProperty(key); + if (buglevel == null) { + try { + AccessController.doPrivileged(new PrivilegedAction() { + @Override + @SuppressForbidden(reason = "to use System#setProperty to set sun.nio.ch.bugLevel") + public Void run() { + System.setProperty(key, ""); + return null; + } + }); + } catch (final SecurityException e) { + Loggers.getLogger(Netty3Utils.class).debug("Unable to get/set System Property: {}", e, key); + } + } } public static void setup() { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java index eb91c47564e..8f449b95ecd 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport; -import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -27,8 +26,6 @@ import org.elasticsearch.http.netty4.Netty4HttpServerTransport; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.transport.netty4.Netty4Transport; -import java.security.AccessController; -import java.security.PrivilegedAction; import java.util.Arrays; import java.util.List; @@ -37,31 +34,6 @@ public class Netty4Plugin extends Plugin { public static final String NETTY_TRANSPORT_NAME = "netty4"; public static final String NETTY_HTTP_TRANSPORT_NAME = "netty4"; - public Netty4Plugin(Settings settings) { - SecurityManager sm = System.getSecurityManager(); - if (sm != null) { - sm.checkPermission(new SpecialPermission()); - } - AccessController.doPrivileged((PrivilegedAction) () -> { - try { - Class.forName("io.netty.channel.nio.NioEventLoop"); - } catch (ClassNotFoundException e) { - throw new AssertionError(e); // we don't do anything with this - } - return null; - }); - /* - * Asserts that sun.nio.ch.bugLevel has been set to a non-null value. This assertion will fail if the corresponding code - * is not executed in a doPrivileged block. This can be disabled via `netty.assert.buglevel` setting which isn't registered - * by default but test can do so if they depend on the jar instead of the module. - */ - //TODO Once we have no jar level dependency we can get rid of this. - if (settings.getAsBoolean("netty.assert.buglevel", true)) { - assert System.getProperty("sun.nio.ch.bugLevel") != null : - "sun.nio.ch.bugLevel is null somebody pulls in SelectorUtil without doing stuff in a doPrivileged block?"; - } - } - @Override public List> getSettings() { return Arrays.asList( diff --git a/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy b/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy index 45c8cd923aa..a6fb99d1f62 100644 --- a/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy +++ b/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy @@ -17,8 +17,8 @@ * under the License. */ -grant { - // Netty SelectorUtil wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 +grant codeBase "${codebase.netty-transport-4.1.4.Final.jar}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; }; \ No newline at end of file diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportPublishAddressIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportPublishAddressIT.java index 0b8e4ed85b5..6a6f7bee80e 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportPublishAddressIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportPublishAddressIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.Netty4Plugin; diff --git a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java index 96c1139c790..645cc8382d6 100644 --- a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java +++ b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java @@ -50,7 +50,6 @@ import java.util.Locale; import java.util.concurrent.atomic.AtomicInteger; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; import static org.hamcrest.Matchers.notNullValue; diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java index 03a7ba68a71..98c9523275f 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java @@ -61,7 +61,6 @@ public abstract class HttpSmokeTestCase extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put("netty.assert.buglevel", false) .put(NetworkModule.TRANSPORT_TYPE_KEY, nodeTransportTypeKey) .put(NetworkModule.HTTP_TYPE_KEY, nodeHttpTypeKey) .put(NetworkModule.HTTP_ENABLED.getKey(), true).build(); @@ -69,19 +68,18 @@ public abstract class HttpSmokeTestCase extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return pluginList(MockTcpTransportPlugin.class, Netty3Plugin.class, Netty4Plugin.class, BogusPlugin.class); + return pluginList(MockTcpTransportPlugin.class, Netty3Plugin.class, Netty4Plugin.class); } @Override protected Collection> transportClientPlugins() { - return pluginList(MockTcpTransportPlugin.class, Netty3Plugin.class, Netty4Plugin.class, BogusPlugin.class); + return pluginList(MockTcpTransportPlugin.class, Netty3Plugin.class, Netty4Plugin.class); } @Override protected Settings transportClientSettings() { return Settings.builder() .put(super.transportClientSettings()) - .put("netty.assert.buglevel", false) .put(NetworkModule.TRANSPORT_TYPE_KEY, clientTypeKey) .build(); } @@ -91,18 +89,4 @@ public abstract class HttpSmokeTestCase extends ESIntegTestCase { return true; } - public static final class BogusPlugin extends Plugin { - - // this runs without the permission from the netty modules so it will fail since reindex can't set the property - // to make it still work we disable that check but need to register the setting first - private static final Setting ASSERT_NETTY_BUGLEVEL = - Setting.boolSetting("netty.assert.buglevel", true, Setting.Property.NodeScope); - - @Override - public List> getSettings() { - return Collections.singletonList(ASSERT_NETTY_BUGLEVEL); - } - - } - } From 64c7ba96d9456e70095048e38bac147b04144b43 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 5 Aug 2016 15:31:03 -0400 Subject: [PATCH 075/103] Add field for plugins installed on issue template This commit adds a field to the GitHub issue template for a list of the plugins that are installed on the Elasticsearch installation. This is a common enough ask that it is better to just collect this information up front. Relates #19840 --- .github/ISSUE_TEMPLATE.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 53e964188f6..7822d8ef2b3 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -17,6 +17,8 @@ request block and provide responses for all of the below items. **Elasticsearch version**: +**Plugins installed**: [] + **JVM version**: **OS version**: From 6ccb70e1ab8963e7742ce972ad5303646f2b3323 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 5 Aug 2016 18:11:50 +0200 Subject: [PATCH 076/103] Avoid using injector and more test to TransportClientTests --- .../client/transport/TransportClient.java | 9 +++- .../client/transport/TransportClientIT.java | 51 ------------------- .../transport/TransportClientTests.java | 50 ++++++++++++++++++ 3 files changed, 57 insertions(+), 53 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index aa11b389555..f7ce9f929bd 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -170,7 +170,7 @@ public abstract class TransportClient extends AbstractClient { transportService.start(); transportService.acceptIncomingRequests(); - ClientTemplate transportClient = new ClientTemplate(injector, pluginLifecycleComponents, nodesService, proxy); + ClientTemplate transportClient = new ClientTemplate(injector, pluginLifecycleComponents, nodesService, proxy, namedWriteableRegistry); resourcesToClose.clear(); return transportClient; } finally { @@ -183,12 +183,15 @@ public abstract class TransportClient extends AbstractClient { private final List pluginLifecycleComponents; private final TransportClientNodesService nodesService; private final TransportProxyClient proxy; + private final NamedWriteableRegistry namedWriteableRegistry; - private ClientTemplate(Injector injector, List pluginLifecycleComponents, TransportClientNodesService nodesService, TransportProxyClient proxy) { + private ClientTemplate(Injector injector, List pluginLifecycleComponents, + TransportClientNodesService nodesService, TransportProxyClient proxy, NamedWriteableRegistry namedWriteableRegistry) { this.injector = injector; this.pluginLifecycleComponents = pluginLifecycleComponents; this.nodesService = nodesService; this.proxy = proxy; + this.namedWriteableRegistry = namedWriteableRegistry; } Settings getSettings() { @@ -203,6 +206,7 @@ public abstract class TransportClient extends AbstractClient { public static final String CLIENT_TYPE = "transport"; final Injector injector; + final NamedWriteableRegistry namedWriteableRegistry; private final List pluginLifecycleComponents; private final TransportClientNodesService nodesService; @@ -231,6 +235,7 @@ public abstract class TransportClient extends AbstractClient { this.pluginLifecycleComponents = Collections.unmodifiableList(template.pluginLifecycleComponents); this.nodesService = template.nodesService; this.proxy = template.proxy; + this.namedWriteableRegistry = template.namedWriteableRegistry; } /** diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java index 59fbb94606d..9b5b764b88e 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java @@ -22,17 +22,11 @@ package org.elasticsearch.client.transport; import org.elasticsearch.Version; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.env.Environment; import org.elasticsearch.node.Node; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -40,8 +34,6 @@ import org.elasticsearch.transport.MockTransportClient; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.Arrays; -import java.util.List; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -106,47 +98,4 @@ public class TransportClientIT extends ESIntegTestCase { assertThat(Client.CLIENT_TYPE_SETTING_S.get(settings), is("transport")); } } - - /** - * test that when plugins are provided that want to register - * {@link NamedWriteable}, those are also made known to the - * {@link NamedWriteableRegistry} of the transport client - */ - public void testPluginNamedWriteablesRegistered() { - Settings baseSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - try (TransportClient client = new MockTransportClient(baseSettings, pluginList(MockPlugin.class))) { - NamedWriteableRegistry registry = client.injector.getInstance(NamedWriteableRegistry.class); - assertNotNull(registry.getReader(MockPlugin.MockNamedWriteable.class, MockPlugin.MockNamedWriteable.NAME)); - } - } - - public static class MockPlugin extends Plugin { - - @Override - public List getNamedWriteables() { - return Arrays.asList(new Entry[]{ new Entry(MockNamedWriteable.class, MockNamedWriteable.NAME, MockNamedWriteable::new)}); - } - - public class MockNamedWriteable implements NamedWriteable { - - static final String NAME = "mockNamedWritable"; - - MockNamedWriteable(StreamInput in) { - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - } - - @Override - public String getWriteableName() { - return NAME; - } - - } - } - - } diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java index 2145f66b5e0..c97418bae37 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java @@ -20,10 +20,20 @@ package org.elasticsearch.client.transport; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.MockTransportClient; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; import java.util.concurrent.ExecutionException; import static org.hamcrest.CoreMatchers.containsString; @@ -38,4 +48,44 @@ public class TransportClientTests extends ESTestCase { expectThrows(IllegalStateException.class, () -> client.admin().cluster().health(new ClusterHealthRequest()).get()); assertThat(e, hasToString(containsString("transport client is closed"))); } + + /** + * test that when plugins are provided that want to register + * {@link NamedWriteable}, those are also made known to the + * {@link NamedWriteableRegistry} of the transport client + */ + public void testPluginNamedWriteablesRegistered() { + Settings baseSettings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .build(); + try (TransportClient client = new MockTransportClient(baseSettings, Arrays.asList(MockPlugin.class))) { + assertNotNull(client.namedWriteableRegistry.getReader(MockPlugin.MockNamedWriteable.class, MockPlugin.MockNamedWriteable.NAME)); + } + } + + public static class MockPlugin extends Plugin { + + @Override + public List getNamedWriteables() { + return Arrays.asList(new Entry[]{ new Entry(MockNamedWriteable.class, MockNamedWriteable.NAME, MockNamedWriteable::new)}); + } + + public class MockNamedWriteable implements NamedWriteable { + + static final String NAME = "mockNamedWritable"; + + MockNamedWriteable(StreamInput in) { + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + @Override + public String getWriteableName() { + return NAME; + } + + } + } } From 243722680268ddbfc9b7a321d229c08d38773c4c Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 5 Aug 2016 20:47:32 +0200 Subject: [PATCH 077/103] [TEST] restore tests repeatability in AbstractQueryTestCase Some random operations were conditionally performed in the before test, which made tests not repeatable. For instance take the seed chain to repeat a specific iteration and try to reproduce it, this conditional code would get executed in both cases when trying to isolate the failure, but not among the different iterations (as only the first method/iteration executes it), hence the failure will not reproduce. Moved the random operations to beforeClass and left the non random part in the before method, which is needed as it depends on some method that can be overridden by subclasses. --- .../PercolateQueryBuilderTests.java | 3 +- .../test/AbstractQueryTestCase.java | 113 ++++++++++-------- 2 files changed, 61 insertions(+), 55 deletions(-) diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index 09cb4a10aa1..6c8b345f543 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -46,7 +46,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.Script; import org.elasticsearch.test.AbstractQueryTestCase; import org.hamcrest.Matchers; @@ -75,7 +74,7 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase> getPlugins() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 64baa359447..d5c4752da5e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -43,6 +43,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -103,6 +104,7 @@ import org.joda.time.DateTimeZone; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; +import org.junit.BeforeClass; import java.io.Closeable; import java.io.IOException; @@ -145,18 +147,18 @@ public abstract class AbstractQueryTestCase> private static ServiceHolder serviceHolder; private static int queryNameId = 0; + private static Settings nodeSettings; + private static Settings indexSettings; + private static Index index; + private static String[] currentTypes; private static String[] randomTypes; - protected Index getIndex() { - return serviceHolder.index; + protected static Index getIndex() { + return index; } - protected Version getIndexVersionCreated() { - return serviceHolder.indexVersionCreated; - } - - protected String[] getCurrentTypes() { - return serviceHolder.currentTypes; + protected static String[] getCurrentTypes() { + return currentTypes; } protected Collection> getPlugins() { @@ -166,6 +168,32 @@ public abstract class AbstractQueryTestCase> protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { } + @BeforeClass + public static void beforeClass() { + // we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually. + Version indexVersionCreated = randomBoolean() ? Version.CURRENT + : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0_beta1, Version.CURRENT); + nodeSettings = Settings.builder() + .put("node.name", AbstractQueryTestCase.class.toString()) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false) + .build(); + indexSettings = Settings.builder() + .put(ParseFieldMatcher.PARSE_STRICT, true) + .put(IndexMetaData.SETTING_VERSION_CREATED, indexVersionCreated).build(); + + index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_"); + + //create some random type with some default field, those types will stick around for all of the subclasses + currentTypes = new String[randomIntBetween(0, 5)]; + for (int i = 0; i < currentTypes.length; i++) { + String type = randomAsciiOfLengthBetween(1, 10); + currentTypes[i] = type; + } + //set some random types to be queried as part the search request, before each test + randomTypes = getRandomTypes(); + } + @AfterClass public static void afterClass() throws Exception { IOUtils.close(serviceHolder); @@ -175,12 +203,9 @@ public abstract class AbstractQueryTestCase> @Before public void beforeTest() throws IOException { if (serviceHolder == null) { - serviceHolder = new ServiceHolder(getPlugins(), this); + serviceHolder = new ServiceHolder(nodeSettings, indexSettings, getPlugins(), this); } - serviceHolder.clientInvocationHandler.delegate = this; - //set some random types to be queried as part the search request, before each test - randomTypes = getRandomTypes(); } private static void setSearchContext(String[] types, QueryShardContext context) { @@ -255,7 +280,7 @@ public abstract class AbstractQueryTestCase> * recursive random shuffling in the {@link #testFromXContent()} test case */ protected String[] shuffleProtectedFields() { - return new String[0]; + return Strings.EMPTY_ARRAY; } protected static XContentBuilder toXContent(QueryBuilder query, XContentType contentType) throws IOException { @@ -544,7 +569,7 @@ public abstract class AbstractQueryTestCase> /** * Serialize the given query builder and asserts that both are equal */ - protected QueryBuilder assertSerialization(QueryBuilder testQuery) throws IOException { + protected static QueryBuilder assertSerialization(QueryBuilder testQuery) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { output.writeNamedWriteable(testQuery); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), serviceHolder.namedWriteableRegistry)) { @@ -663,9 +688,9 @@ public abstract class AbstractQueryTestCase> /** * Helper method to return a mapped or a random field */ - protected String getRandomFieldName() { + protected static String getRandomFieldName() { // if no type is set then return a random field name - if (serviceHolder.currentTypes.length == 0 || randomBoolean()) { + if (currentTypes.length == 0 || randomBoolean()) { return randomAsciiOfLengthBetween(1, 10); } return randomFrom(MAPPED_LEAF_FIELD_NAMES); @@ -688,13 +713,13 @@ public abstract class AbstractQueryTestCase> return rewrite; } - private String[] getRandomTypes() { + private static String[] getRandomTypes() { String[] types; - if (serviceHolder.currentTypes.length > 0 && randomBoolean()) { - int numberOfQueryTypes = randomIntBetween(1, serviceHolder.currentTypes.length); + if (currentTypes.length > 0 && randomBoolean()) { + int numberOfQueryTypes = randomIntBetween(1, currentTypes.length); types = new String[numberOfQueryTypes]; for (int i = 0; i < numberOfQueryTypes; i++) { - types[i] = randomFrom(serviceHolder.currentTypes); + types[i] = randomFrom(currentTypes); } } else { if (randomBoolean()) { @@ -706,8 +731,8 @@ public abstract class AbstractQueryTestCase> return types; } - protected String getRandomType() { - return (serviceHolder.currentTypes.length == 0) ? MetaData.ALL : randomFrom(serviceHolder.currentTypes); + protected static String getRandomType() { + return (currentTypes.length == 0) ? MetaData.ALL : randomFrom(currentTypes); } protected static Fuzziness randomFuzziness(String fieldName) { @@ -848,9 +873,6 @@ public abstract class AbstractQueryTestCase> private final IndicesQueriesRegistry indicesQueriesRegistry; private final IndexFieldDataService indexFieldDataService; private final SearchModule searchModule; - private final Index index; - private final Version indexVersionCreated; - private final String[] currentTypes; private final NamedWriteableRegistry namedWriteableRegistry; private final ClientInvocationHandler clientInvocationHandler = new ClientInvocationHandler(); private final IndexSettings idxSettings; @@ -859,26 +881,15 @@ public abstract class AbstractQueryTestCase> private final BitsetFilterCache bitsetFilterCache; private final ScriptService scriptService; - ServiceHolder(Collection> plugins, AbstractQueryTestCase testCase) throws IOException { - // we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually. - indexVersionCreated = randomBoolean() ? Version.CURRENT - : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0_beta1, Version.CURRENT); - Settings settings = Settings.builder() - .put("node.name", AbstractQueryTestCase.class.toString()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false) - .build(); - Settings indexSettings = Settings.builder() - .put(ParseFieldMatcher.PARSE_STRICT, true) - .put(IndexMetaData.SETTING_VERSION_CREATED, indexVersionCreated).build(); - final ThreadPool threadPool = new ThreadPool(settings); - index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_"); + ServiceHolder(Settings nodeSettings, Settings indexSettings, + Collection> plugins, AbstractQueryTestCase testCase) throws IOException { + final ThreadPool threadPool = new ThreadPool(nodeSettings); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); ClusterServiceUtils.setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData( new MetaData.Builder().put(new IndexMetaData.Builder( index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0)))); - Environment env = InternalSettingsPreparer.prepareEnvironment(settings, null); - PluginsService pluginsService =new PluginsService(settings, env.modulesFile(), env.pluginsFile(), plugins); + Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings, null); + PluginsService pluginsService = new PluginsService(nodeSettings, env.modulesFile(), env.pluginsFile(), plugins); final Client proxy = (Client) Proxy.newProxyInstance( Client.class.getClassLoader(), @@ -888,8 +899,8 @@ public abstract class AbstractQueryTestCase> List> scriptSettings = scriptModule.getSettings(); scriptSettings.addAll(pluginsService.getPluginSettings()); scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED); - SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, pluginsService.getPluginSettingsFilter()); - searchModule = new SearchModule(settings, false, pluginsService.filterPlugins(SearchPlugin.class)) { + SettingsModule settingsModule = new SettingsModule(nodeSettings, scriptSettings, pluginsService.getPluginSettingsFilter()); + searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class)) { @Override protected void configureSearch() { // Skip me @@ -913,7 +924,7 @@ public abstract class AbstractQueryTestCase> modulesBuilder.add( b -> { b.bind(PluginsService.class).toInstance(pluginsService); - b.bind(Environment.class).toInstance(new Environment(settings)); + b.bind(Environment.class).toInstance(new Environment(nodeSettings)); b.bind(ThreadPool.class).toInstance(threadPool); b.bind(Client.class).toInstance(proxy); b.bind(ClusterService.class).toProvider(Providers.of(clusterService)); @@ -926,14 +937,13 @@ public abstract class AbstractQueryTestCase> injector = modulesBuilder.createInjector(); IndexScopedSettings indexScopedSettings = injector.getInstance(IndexScopedSettings.class); idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings, indexScopedSettings); - AnalysisModule analysisModule = new AnalysisModule(new Environment(settings), emptyList()); + AnalysisModule analysisModule = new AnalysisModule(new Environment(nodeSettings), emptyList()); AnalysisService analysisService = analysisModule.getAnalysisRegistry().build(idxSettings); scriptService = scriptModule.getScriptService(); similarityService = new SimilarityService(idxSettings, Collections.emptyMap()); MapperRegistry mapperRegistry = injector.getInstance(MapperRegistry.class); - mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry, - this::createShardContext); - IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() { + mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry, this::createShardContext); + IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() { }); indexFieldDataService = new IndexFieldDataService(idxSettings, indicesFieldDataCache, injector.getInstance(CircuitBreakerService.class), mapperService); @@ -949,10 +959,8 @@ public abstract class AbstractQueryTestCase> } }); indicesQueriesRegistry = injector.getInstance(IndicesQueriesRegistry.class); - //create some random type with some default field, those types will stick around for all of the subclasses - currentTypes = new String[randomIntBetween(0, 5)]; - for (int i = 0; i < currentTypes.length; i++) { - String type = randomAsciiOfLengthBetween(1, 10); + + for (String type : currentTypes) { mapperService.merge(type, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(type, STRING_FIELD_NAME, "type=text", STRING_FIELD_NAME_2, "type=keyword", @@ -969,7 +977,6 @@ public abstract class AbstractQueryTestCase> + "\"properties\":{\"" + DATE_FIELD_NAME + "\":{\"type\":\"date\"},\"" + INT_FIELD_NAME + "\":{\"type\":\"integer\"}}}}}"), MapperService.MergeReason.MAPPING_UPDATE, false); - currentTypes[i] = type; } testCase.initializeAdditionalMappings(mapperService); this.namedWriteableRegistry = injector.getInstance(NamedWriteableRegistry.class); From fee013c07c50aaa06415ec4ce4c77f398add0c1e Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Fri, 5 Aug 2016 17:49:56 -0400 Subject: [PATCH 078/103] Add support for returning documents with completion suggester This commit enables completion suggester to return documents associated with suggestions. Now the document source is returned with every suggestion, which respects source filtering options. In case of suggest queries spanning more than one shard, the suggest is executed in two phases, where the last phase fetches the relevant documents from shards, implying executing suggest requests against a single shard is more performant due to the document fetch overhead when the suggest spans multiple shards. --- .../search/AbstractSearchAsyncAction.java | 18 +- .../SearchDfsQueryAndFetchAsyncAction.java | 4 +- .../SearchDfsQueryThenFetchAsyncAction.java | 19 +- .../SearchQueryAndFetchAsyncAction.java | 11 +- .../SearchQueryThenFetchAsyncAction.java | 19 +- .../SearchScrollQueryAndFetchAsyncAction.java | 4 +- ...SearchScrollQueryThenFetchAsyncAction.java | 11 +- .../elasticsearch/search/SearchService.java | 59 ++-- .../controller/SearchPhaseController.java | 256 ++++++++++++------ .../search/fetch/ShardFetchSearchRequest.java | 3 - .../MatchedQueriesFetchSubPhase.java | 4 +- .../search/internal/InternalSearchHit.java | 15 +- .../elasticsearch/search/suggest/Suggest.java | 62 +++-- .../completion/CompletionSuggester.java | 2 +- .../completion/CompletionSuggestion.java | 131 +++++++-- .../SearchPhaseControllerTests.java | 234 ++++++++++++++++ .../suggest/CompletionSuggestSearchIT.java | 113 ++++++++ .../search/suggest/SuggestTests.java | 73 +++++ .../completion/CompletionSuggestionTests.java | 61 +++++ .../suggesters/completion-suggest.asciidoc | 25 +- 20 files changed, 918 insertions(+), 206 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/search/controller/SearchPhaseControllerTests.java create mode 100644 core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java create mode 100644 core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionTests.java diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 642748bd031..f9103f0cddc 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -46,6 +46,7 @@ import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.threadpool.ThreadPool; import java.util.List; @@ -74,7 +75,7 @@ abstract class AbstractSearchAsyncAction protected final AtomicArray firstResults; private volatile AtomicArray shardFailures; private final Object shardFailuresMutex = new Object(); - protected volatile ScoreDoc[] sortedShardList; + protected volatile ScoreDoc[] sortedShardDocs; protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, @@ -321,8 +322,11 @@ abstract class AbstractSearchAsyncAction // we only release search context that we did not fetch from if we are not scrolling if (request.scroll() == null) { for (AtomicArray.Entry entry : queryResults.asList()) { - final TopDocs topDocs = entry.value.queryResult().queryResult().topDocs(); - if (topDocs != null && topDocs.scoreDocs.length > 0 // the shard had matches + QuerySearchResult queryResult = entry.value.queryResult().queryResult(); + final TopDocs topDocs = queryResult.topDocs(); + final Suggest suggest = queryResult.suggest(); + if (((topDocs != null && topDocs.scoreDocs.length > 0) // the shard had matches + ||suggest != null && suggest.hasScoreDocs()) // or had suggest docs && docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs try { DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId()); @@ -343,12 +347,8 @@ abstract class AbstractSearchAsyncAction protected ShardFetchSearchRequest createFetchRequest(QuerySearchResult queryResult, AtomicArray.Entry entry, ScoreDoc[] lastEmittedDocPerShard) { - if (lastEmittedDocPerShard != null) { - ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index]; - return new ShardFetchSearchRequest(request, queryResult.id(), entry.value, lastEmittedDoc); - } else { - return new ShardFetchSearchRequest(request, queryResult.id(), entry.value); - } + final ScoreDoc lastEmittedDoc = (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[entry.index] : null; + return new ShardFetchSearchRequest(request, queryResult.id(), entry.value, lastEmittedDoc); } protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java index e19540e26d5..8614d7b1188 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java @@ -118,8 +118,8 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction(listener) { @Override public void doRun() throws IOException { - sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults); - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, + sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryFetchResults, queryFetchResults); String scrollId = null; if (request.scroll() != null) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index cf3f9716710..9d8305cf6b1 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -135,18 +135,17 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : docIdsToLoad.asList()) { QuerySearchResult queryResult = queryResults.get(entry.index); @@ -196,12 +195,10 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction(listener) { @Override public void doRun() throws IOException { - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, + final boolean isScrollRequest = request.scroll() != null; + final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, sortedShardDocs, queryResults, fetchResults); - String scrollId = null; - if (request.scroll() != null) { - scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults); - } + String scrollId = isScrollRequest ? TransportSearchHelper.buildScrollId(request.searchType(), firstResults) : null; listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures())); releaseIrrelevantSearchContexts(queryResults, docIdsToLoad); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java index 5d55dd468a5..fad4d60275d 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java @@ -60,14 +60,11 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction(listener) { @Override public void doRun() throws IOException { - boolean useScroll = request.scroll() != null; - sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults); - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, + final boolean isScrollRequest = request.scroll() != null; + sortedShardDocs = searchPhaseController.sortDocs(isScrollRequest, firstResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, sortedShardDocs, firstResults, firstResults); - String scrollId = null; - if (request.scroll() != null) { - scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults); - } + String scrollId = isScrollRequest ? TransportSearchHelper.buildScrollId(request.searchType(), firstResults) : null; listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures())); } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index a6f9aa26f59..5f90d291dd2 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -68,18 +68,17 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : docIdsToLoad.asList()) { QuerySearchResultProvider queryResult = firstResults.get(entry.index); @@ -129,12 +128,10 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction(listener) { @Override public void doRun() throws IOException { - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, + final boolean isScrollRequest = request.scroll() != null; + final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, sortedShardDocs, firstResults, fetchResults); - String scrollId = null; - if (request.scroll() != null) { - scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults); - } + String scrollId = isScrollRequest ? TransportSearchHelper.buildScrollId(request.searchType(), firstResults) : null; listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures())); releaseIrrelevantSearchContexts(firstResults, docIdsToLoad); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java index 94ce1887c34..72154f224d2 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java @@ -168,8 +168,8 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { } private void innerFinishHim() throws Exception { - ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults); - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, + ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryFetchResults, queryFetchResults); String scrollId = null; if (request.scroll() != null) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index ac8715eeb9f..d9f649a7a55 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -53,7 +53,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { private volatile AtomicArray shardFailures; final AtomicArray queryResults; final AtomicArray fetchResults; - private volatile ScoreDoc[] sortedShardList; + private volatile ScoreDoc[] sortedShardDocs; private final AtomicInteger successfulOps; SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService, @@ -165,9 +165,9 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { } private void executeFetchPhase() throws Exception { - sortedShardList = searchPhaseController.sortDocs(true, queryResults); + sortedShardDocs = searchPhaseController.sortDocs(true, queryResults); AtomicArray docIdsToLoad = new AtomicArray<>(queryResults.length()); - searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList); + searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardDocs); if (docIdsToLoad.asList().isEmpty()) { finishHim(); @@ -175,7 +175,8 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { } - final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(sortedShardList, queryResults.length()); + final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(queryResults.asList(), + sortedShardDocs, queryResults.length()); final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size()); for (final AtomicArray.Entry entry : docIdsToLoad.asList()) { IntArrayList docIds = entry.value; @@ -216,7 +217,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { } private void innerFinishHim() { - InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults); + InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryResults, fetchResults); String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index bfcfcb9d4c8..4d618eb057a 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -21,6 +21,7 @@ package org.elasticsearch.search; import com.carrotsearch.hppc.ObjectFloatHashMap; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -87,6 +88,8 @@ import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.search.searchafter.SearchAfterBuilder; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Cancellable; import org.elasticsearch.threadpool.ThreadPool.Names; @@ -94,6 +97,7 @@ import org.elasticsearch.threadpool.ThreadPool.Names; import java.io.IOException; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutionException; @@ -265,7 +269,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv loadOrExecuteQueryPhase(request, context); - if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scrollContext() == null) { + if (hasHits(context.queryResult()) == false && context.scrollContext() == null) { freeContext(context.id()); } else { contextProcessedSuccessfully(context); @@ -320,7 +324,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv operationListener.onPreQueryPhase(context); long time = System.nanoTime(); queryPhase.execute(context); - if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scrollContext() == null) { + if (hasHits(context.queryResult()) == false && context.scrollContext() == null) { // no hits, we can release the context since there will be no fetch phase freeContext(context.id()); } else { @@ -811,40 +815,55 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } } - private static final int[] EMPTY_DOC_IDS = new int[0]; - /** * Shortcut ids to load, we load only "from" and up to "size". The phase controller * handles this as well since the result is always size * shards for Q_A_F */ private void shortcutDocIdsToLoad(SearchContext context) { + final int[] docIdsToLoad; + int docsOffset = 0; + final Suggest suggest = context.queryResult().suggest(); + int numSuggestDocs = 0; + final List completionSuggestions; + if (suggest != null && suggest.hasScoreDocs()) { + completionSuggestions = suggest.filter(CompletionSuggestion.class); + for (CompletionSuggestion completionSuggestion : completionSuggestions) { + numSuggestDocs += completionSuggestion.getOptions().size(); + } + } else { + completionSuggestions = Collections.emptyList(); + } if (context.request().scroll() != null) { TopDocs topDocs = context.queryResult().topDocs(); - int[] docIdsToLoad = new int[topDocs.scoreDocs.length]; + docIdsToLoad = new int[topDocs.scoreDocs.length + numSuggestDocs]; for (int i = 0; i < topDocs.scoreDocs.length; i++) { - docIdsToLoad[i] = topDocs.scoreDocs[i].doc; + docIdsToLoad[docsOffset++] = topDocs.scoreDocs[i].doc; } - context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); } else { TopDocs topDocs = context.queryResult().topDocs(); if (topDocs.scoreDocs.length < context.from()) { // no more docs... - context.docIdsToLoad(EMPTY_DOC_IDS, 0, 0); - return; - } - int totalSize = context.from() + context.size(); - int[] docIdsToLoad = new int[Math.min(topDocs.scoreDocs.length - context.from(), context.size())]; - int counter = 0; - for (int i = context.from(); i < totalSize; i++) { - if (i < topDocs.scoreDocs.length) { - docIdsToLoad[counter] = topDocs.scoreDocs[i].doc; - } else { - break; + docIdsToLoad = new int[numSuggestDocs]; + } else { + int totalSize = context.from() + context.size(); + docIdsToLoad = new int[Math.min(topDocs.scoreDocs.length - context.from(), context.size()) + + numSuggestDocs]; + for (int i = context.from(); i < Math.min(totalSize, topDocs.scoreDocs.length); i++) { + docIdsToLoad[docsOffset++] = topDocs.scoreDocs[i].doc; } - counter++; } - context.docIdsToLoad(docIdsToLoad, 0, counter); } + for (CompletionSuggestion completionSuggestion : completionSuggestions) { + for (CompletionSuggestion.Entry.Option option : completionSuggestion.getOptions()) { + docIdsToLoad[docsOffset++] = option.getDoc().doc; + } + } + context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); + } + + private static boolean hasHits(final QuerySearchResult searchResult) { + return searchResult.topDocs().scoreDocs.length > 0 || + (searchResult.suggest() != null && searchResult.suggest().hasScoreDocs()); } private void processScroll(InternalScrollSearchRequest request, SearchContext context) { diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java index b2ce044e4fc..97f3b191aa9 100644 --- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java @@ -30,7 +30,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.component.AbstractComponent; @@ -53,18 +52,22 @@ import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.profile.SearchProfileShardResults; -import org.elasticsearch.search.profile.query.QueryProfileShardResult; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.suggest.Suggest.Suggestion; +import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry; +import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.TreeMap; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -154,6 +157,10 @@ public class SearchPhaseController extends AbstractComponent { } /** + * Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each + * named completion suggestion across all shards. If more than one named completion suggestion is specified in the + * request, the suggest docs for a named suggestion are ordered by the suggestion name. + * * @param ignoreFrom Whether to ignore the from and sort all hits in each shard result. * Enabled only for scroll search, because that only retrieves hits of length 'size' in the query phase. * @param resultsArr Shard result holder @@ -191,19 +198,40 @@ public class SearchPhaseController extends AbstractComponent { offset = 0; } ScoreDoc[] scoreDocs = result.topDocs().scoreDocs; + ScoreDoc[] docs; + int numSuggestDocs = 0; + final Suggest suggest = result.queryResult().suggest(); + final List completionSuggestions; + if (suggest != null) { + completionSuggestions = suggest.filter(CompletionSuggestion.class); + for (CompletionSuggestion suggestion : completionSuggestions) { + numSuggestDocs += suggestion.getOptions().size(); + } + } else { + completionSuggestions = Collections.emptyList(); + } + int docsOffset = 0; if (scoreDocs.length == 0 || scoreDocs.length < offset) { - return EMPTY_DOCS; + docs = new ScoreDoc[numSuggestDocs]; + } else { + int resultDocsSize = result.size(); + if ((scoreDocs.length - offset) < resultDocsSize) { + resultDocsSize = scoreDocs.length - offset; + } + docs = new ScoreDoc[resultDocsSize + numSuggestDocs]; + for (int i = 0; i < resultDocsSize; i++) { + ScoreDoc scoreDoc = scoreDocs[offset + i]; + scoreDoc.shardIndex = shardIndex; + docs[i] = scoreDoc; + docsOffset++; + } } - - int resultDocsSize = result.size(); - if ((scoreDocs.length - offset) < resultDocsSize) { - resultDocsSize = scoreDocs.length - offset; - } - ScoreDoc[] docs = new ScoreDoc[resultDocsSize]; - for (int i = 0; i < resultDocsSize; i++) { - ScoreDoc scoreDoc = scoreDocs[offset + i]; - scoreDoc.shardIndex = shardIndex; - docs[i] = scoreDoc; + for (CompletionSuggestion suggestion: completionSuggestions) { + for (CompletionSuggestion.Entry.Option option : suggestion.getOptions()) { + ScoreDoc doc = option.getDoc(); + doc.shardIndex = shardIndex; + docs[docsOffset++] = doc; + } } return docs; } @@ -213,13 +241,7 @@ public class SearchPhaseController extends AbstractComponent { Arrays.sort(sortedResults, QUERY_RESULT_ORDERING); QuerySearchResultProvider firstResult = sortedResults[0].value; - int topN = firstResult.queryResult().size(); - if (firstResult.includeFetch()) { - // if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them... - // this is also important since we shortcut and fetch only docs from "from" and up to "size" - topN *= sortedResults.length; - } - + int topN = topN(results); int from = firstResult.queryResult().from(); if (ignoreFrom) { from = 0; @@ -258,40 +280,86 @@ public class SearchPhaseController extends AbstractComponent { } mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs); } - return mergedTopDocs.scoreDocs; - } - public ScoreDoc[] getLastEmittedDocPerShard(SearchRequest request, ScoreDoc[] sortedShardList, int numShards) { - if (request.scroll() != null) { - return getLastEmittedDocPerShard(sortedShardList, numShards); - } else { - return null; + ScoreDoc[] scoreDocs = mergedTopDocs.scoreDocs; + final Map>> groupedCompletionSuggestions = new HashMap<>(); + // group suggestions and assign shard index + for (AtomicArray.Entry sortedResult : sortedResults) { + Suggest shardSuggest = sortedResult.value.queryResult().suggest(); + if (shardSuggest != null) { + for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) { + suggestion.setShardIndex(sortedResult.index); + List> suggestions = + groupedCompletionSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()); + suggestions.add(suggestion); + } + } } + if (groupedCompletionSuggestions.isEmpty() == false) { + int numSuggestDocs = 0; + List>> completionSuggestions = + new ArrayList<>(groupedCompletionSuggestions.size()); + for (List> groupedSuggestions : groupedCompletionSuggestions.values()) { + final CompletionSuggestion completionSuggestion = CompletionSuggestion.reduceTo(groupedSuggestions); + assert completionSuggestion != null; + numSuggestDocs += completionSuggestion.getOptions().size(); + completionSuggestions.add(completionSuggestion); + } + scoreDocs = new ScoreDoc[mergedTopDocs.scoreDocs.length + numSuggestDocs]; + System.arraycopy(mergedTopDocs.scoreDocs, 0, scoreDocs, 0, mergedTopDocs.scoreDocs.length); + int offset = mergedTopDocs.scoreDocs.length; + Suggest suggestions = new Suggest(completionSuggestions); + for (CompletionSuggestion completionSuggestion : suggestions.filter(CompletionSuggestion.class)) { + for (CompletionSuggestion.Entry.Option option : completionSuggestion.getOptions()) { + scoreDocs[offset++] = option.getDoc(); + } + } + } + return scoreDocs; } - public ScoreDoc[] getLastEmittedDocPerShard(ScoreDoc[] sortedShardList, int numShards) { + public ScoreDoc[] getLastEmittedDocPerShard(List> queryResults, + ScoreDoc[] sortedScoreDocs, int numShards) { ScoreDoc[] lastEmittedDocPerShard = new ScoreDoc[numShards]; - for (ScoreDoc scoreDoc : sortedShardList) { - lastEmittedDocPerShard[scoreDoc.shardIndex] = scoreDoc; + if (queryResults.isEmpty() == false) { + long fetchHits = 0; + for (AtomicArray.Entry queryResult : queryResults) { + fetchHits += queryResult.value.queryResult().topDocs().scoreDocs.length; + } + // from is always zero as when we use scroll, we ignore from + long size = Math.min(fetchHits, topN(queryResults)); + for (int sortedDocsIndex = 0; sortedDocsIndex < size; sortedDocsIndex++) { + ScoreDoc scoreDoc = sortedScoreDocs[sortedDocsIndex]; + lastEmittedDocPerShard[scoreDoc.shardIndex] = scoreDoc; + } } return lastEmittedDocPerShard; + } /** * Builds an array, with potential null elements, with docs to load. */ - public void fillDocIdsToLoad(AtomicArray docsIdsToLoad, ScoreDoc[] shardDocs) { + public void fillDocIdsToLoad(AtomicArray docIdsToLoad, ScoreDoc[] shardDocs) { for (ScoreDoc shardDoc : shardDocs) { - IntArrayList list = docsIdsToLoad.get(shardDoc.shardIndex); - if (list == null) { - list = new IntArrayList(); // can't be shared!, uses unsafe on it later on - docsIdsToLoad.set(shardDoc.shardIndex, list); + IntArrayList shardDocIdsToLoad = docIdsToLoad.get(shardDoc.shardIndex); + if (shardDocIdsToLoad == null) { + shardDocIdsToLoad = new IntArrayList(); // can't be shared!, uses unsafe on it later on + docIdsToLoad.set(shardDoc.shardIndex, shardDocIdsToLoad); } - list.add(shardDoc.doc); + shardDocIdsToLoad.add(shardDoc.doc); } } - public InternalSearchResponse merge(ScoreDoc[] sortedDocs, AtomicArray queryResultsArr, + /** + * Enriches search hits and completion suggestion hits from sortedDocs using fetchResultsArr, + * merges suggestions, aggregations and profile results + * + * Expects sortedDocs to have top search docs across all shards, optionally followed by top suggest docs for each named + * completion suggestion ordered by suggestion name + */ + public InternalSearchResponse merge(boolean ignoreFrom, ScoreDoc[] sortedDocs, + AtomicArray queryResultsArr, AtomicArray fetchResultsArr) { List> queryResults = queryResultsArr.asList(); @@ -317,6 +385,7 @@ public class SearchPhaseController extends AbstractComponent { // count the total (we use the query result provider here, since we might not get any hits (we scrolled past them)) long totalHits = 0; + long fetchHits = 0; float maxScore = Float.NEGATIVE_INFINITY; boolean timedOut = false; Boolean terminatedEarly = null; @@ -333,6 +402,7 @@ public class SearchPhaseController extends AbstractComponent { } } totalHits += result.topDocs().totalHits; + fetchHits += result.topDocs().scoreDocs.length; if (!Float.isNaN(result.topDocs().getMaxScore())) { maxScore = Math.max(maxScore, result.topDocs().getMaxScore()); } @@ -345,11 +415,13 @@ public class SearchPhaseController extends AbstractComponent { for (AtomicArray.Entry entry : fetchResults) { entry.value.fetchResult().initCounter(); } - + int from = ignoreFrom ? 0 : firstResult.queryResult().from(); + int numSearchHits = (int) Math.min(fetchHits - from, topN(queryResults)); // merge hits List hits = new ArrayList<>(); if (!fetchResults.isEmpty()) { - for (ScoreDoc shardDoc : sortedDocs) { + for (int i = 0; i < numSearchHits; i++) { + ScoreDoc shardDoc = sortedDocs[i]; FetchSearchResultProvider fetchResultProvider = fetchResultsArr.get(shardDoc.shardIndex); if (fetchResultProvider == null) { continue; @@ -360,7 +432,6 @@ public class SearchPhaseController extends AbstractComponent { InternalSearchHit searchHit = fetchResult.hits().internalHits()[index]; searchHit.score(shardDoc.score); searchHit.shard(fetchResult.shardTarget()); - if (sorted) { FieldDoc fieldDoc = (FieldDoc) shardDoc; searchHit.sortValues(fieldDoc.fields, firstResult.sortValueFormats()); @@ -368,7 +439,6 @@ public class SearchPhaseController extends AbstractComponent { searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue()); } } - hits.add(searchHit); } } @@ -376,38 +446,72 @@ public class SearchPhaseController extends AbstractComponent { // merge suggest results Suggest suggest = null; - if (!queryResults.isEmpty()) { - final Map> groupedSuggestions = new HashMap<>(); - boolean hasSuggestions = false; - for (AtomicArray.Entry entry : queryResults) { - Suggest shardResult = entry.value.queryResult().queryResult().suggest(); - - if (shardResult == null) { - continue; + if (firstResult.suggest() != null) { + final Map> groupedSuggestions = new HashMap<>(); + for (AtomicArray.Entry queryResult : queryResults) { + Suggest shardSuggest = queryResult.value.queryResult().suggest(); + if (shardSuggest != null) { + for (Suggestion> suggestion : shardSuggest) { + List suggestionList = groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()); + suggestionList.add(suggestion); + } + } + } + if (groupedSuggestions.isEmpty() == false) { + suggest = new Suggest(Suggest.reduce(groupedSuggestions)); + if (!fetchResults.isEmpty()) { + int currentOffset = numSearchHits; + for (CompletionSuggestion suggestion : suggest.filter(CompletionSuggestion.class)) { + final List suggestionOptions = suggestion.getOptions(); + for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) { + ScoreDoc shardDoc = sortedDocs[scoreDocIndex]; + FetchSearchResultProvider fetchSearchResultProvider = fetchResultsArr.get(shardDoc.shardIndex); + if (fetchSearchResultProvider == null) { + continue; + } + FetchSearchResult fetchResult = fetchSearchResultProvider.fetchResult(); + int fetchResultIndex = fetchResult.counterGetAndIncrement(); + if (fetchResultIndex < fetchResult.hits().internalHits().length) { + InternalSearchHit hit = fetchResult.hits().internalHits()[fetchResultIndex]; + CompletionSuggestion.Entry.Option suggestOption = + suggestionOptions.get(scoreDocIndex - currentOffset); + hit.score(shardDoc.score); + hit.shard(fetchResult.shardTarget()); + suggestOption.setHit(hit); + } + } + currentOffset += suggestionOptions.size(); + } + assert currentOffset == sortedDocs.length : "expected no more score doc slices"; } - hasSuggestions = true; - Suggest.group(groupedSuggestions, shardResult); } - - suggest = hasSuggestions ? new Suggest(Suggest.reduce(groupedSuggestions)) : null; } - // merge addAggregation + // merge Aggregation InternalAggregations aggregations = null; - if (!queryResults.isEmpty()) { - if (firstResult.aggregations() != null && firstResult.aggregations().asList() != null) { - List aggregationsList = new ArrayList<>(queryResults.size()); - for (AtomicArray.Entry entry : queryResults) { - aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations()); + if (firstResult.aggregations() != null && firstResult.aggregations().asList() != null) { + List aggregationsList = new ArrayList<>(queryResults.size()); + for (AtomicArray.Entry entry : queryResults) { + aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations()); + } + ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, clusterService.state()); + aggregations = InternalAggregations.reduce(aggregationsList, reduceContext); + List pipelineAggregators = firstResult.pipelineAggregators(); + if (pipelineAggregators != null) { + List newAggs = StreamSupport.stream(aggregations.spliterator(), false) + .map((p) -> (InternalAggregation) p) + .collect(Collectors.toList()); + for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { + InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), reduceContext); + newAggs.add(newAgg); } - ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, clusterService.state()); - aggregations = InternalAggregations.reduce(aggregationsList, reduceContext); + aggregations = new InternalAggregations(newAggs); } } //Collect profile results SearchProfileShardResults shardResults = null; - if (!queryResults.isEmpty() && firstResult.profileResults() != null) { + if (firstResult.profileResults() != null) { Map profileResults = new HashMap<>(queryResults.size()); for (AtomicArray.Entry entry : queryResults) { String key = entry.value.queryResult().shardTarget().toString(); @@ -416,24 +520,22 @@ public class SearchPhaseController extends AbstractComponent { shardResults = new SearchProfileShardResults(profileResults); } - if (aggregations != null) { - List pipelineAggregators = firstResult.pipelineAggregators(); - if (pipelineAggregators != null) { - List newAggs = StreamSupport.stream(aggregations.spliterator(), false).map((p) -> { - return (InternalAggregation) p; - }).collect(Collectors.toList()); - for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { - ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, clusterService.state()); - InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), reduceContext); - newAggs.add(newAgg); - } - aggregations = new InternalAggregations(newAggs); - } - } - InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore); return new InternalSearchResponse(searchHits, aggregations, suggest, shardResults, timedOut, terminatedEarly); } + /** + * returns the number of top results to be considered across all shards + */ + private static int topN(List> queryResults) { + QuerySearchResultProvider firstResult = queryResults.get(0).value; + int topN = firstResult.queryResult().size(); + if (firstResult.includeFetch()) { + // if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them... + // this is also important since we shortcut and fetch only docs from "from" and up to "size" + topN *= queryResults.size(); + } + return topN; + } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/ShardFetchSearchRequest.java b/core/src/main/java/org/elasticsearch/search/fetch/ShardFetchSearchRequest.java index d908aca0fc8..f6738f99725 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/ShardFetchSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/ShardFetchSearchRequest.java @@ -39,10 +39,7 @@ public class ShardFetchSearchRequest extends ShardFetchRequest implements Indice private OriginalIndices originalIndices; public ShardFetchSearchRequest() { - } - public ShardFetchSearchRequest(SearchRequest request, long id, IntArrayList list) { - this(request, id, list, null); } public ShardFetchSearchRequest(SearchRequest request, long id, IntArrayList list, ScoreDoc lastEmittedDoc) { diff --git a/core/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java index 59225f93a61..17f5e5ac705 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java @@ -43,7 +43,9 @@ public final class MatchedQueriesFetchSubPhase implements FetchSubPhase { @Override public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { - if (hits.length == 0) { + if (hits.length == 0 || + // in case the request has only suggest, parsed query is null + context.parsedQuery() == null) { return; } hits = hits.clone(); // don't modify the incoming hits diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java index 191537b4de5..e1d46dd5fd2 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java @@ -415,8 +415,8 @@ public class InternalSearchHit implements SearchHit { static final String INNER_HITS = "inner_hits"; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // public because we render hit as part of completion suggestion option + public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) throws IOException { List metaFields = new ArrayList<>(); List otherFields = new ArrayList<>(); if (fields != null && !fields.isEmpty()) { @@ -432,7 +432,6 @@ public class InternalSearchHit implements SearchHit { } } - builder.startObject(); // For inner_hit hits shard is null and that is ok, because the parent search hit has all this information. // Even if this was included in the inner_hit hits this would be the same, so better leave it out. if (explanation() != null && shard != null) { @@ -516,7 +515,6 @@ public class InternalSearchHit implements SearchHit { } builder.endObject(); } - builder.endObject(); return builder; } @@ -533,6 +531,15 @@ public class InternalSearchHit implements SearchHit { builder.endArray(); } builder.endObject(); + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + toInnerXContent(builder, params); + builder.endObject(); + return builder; } public static InternalSearchHit readSearchHit(StreamInput in, InternalSearchHits.StreamContext context) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java index f8fbdaf969e..95612693f8b 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -40,6 +40,7 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; /** * Top level suggest result, containing the result for each suggestion. @@ -48,18 +49,16 @@ public class Suggest implements Iterable COMPARATOR = new Comparator() { - @Override - public int compare(Option first, Option second) { - int cmp = Float.compare(second.getScore(), first.getScore()); - if (cmp != 0) { - return cmp; - } - return first.getText().compareTo(second.getText()); - } - }; + public static final Comparator