From 7132fcd7ac7274b2b92395af20e80511300c419a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 15 Sep 2016 07:44:05 -0400 Subject: [PATCH 01/25] Give useful error message if log config is missing Today when starting Elasticsearch without a Log4j 2 configuration file, we end up throwing an array index out of bounds exception. This is because we are passing no configuration files to Log4j. Instead, we should throw a useful error message to the user. This commit modifies the Log4j configuration setup to throw a user exception if no Log4j configuration files are present in the config directory. Relates #20493 --- .../org/elasticsearch/bootstrap/Bootstrap.java | 3 ++- .../elasticsearch/bootstrap/Elasticsearch.java | 2 +- .../common/logging/LogConfigurator.java | 11 ++++++++++- .../logging/EvilLoggerConfigurationTests.java | 16 +++++++++++++++- .../common/logging/EvilLoggerTests.java | 11 ++++++----- .../logging/does_not_exist/nothing_to_see_here | 0 6 files changed, 34 insertions(+), 9 deletions(-) create mode 100644 qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/does_not_exist/nothing_to_see_here diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 348fd213e9b..b3ecf34b3d2 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -28,6 +28,7 @@ import org.apache.lucene.util.StringHelper; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; import org.elasticsearch.common.PidFile; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.inject.CreationException; @@ -233,7 +234,7 @@ final class Bootstrap { final boolean foreground, final Path pidFile, final boolean quiet, - final Map esSettings) throws BootstrapException, NodeValidationException { + final Map esSettings) throws BootstrapException, NodeValidationException, UserException { // Set the system property before anything has a chance to trigger its use initLoggerPrefix(); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 43160ee8c9b..046f4a2b4a6 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -107,7 +107,7 @@ class Elasticsearch extends SettingCommand { } void init(final boolean daemonize, final Path pidFile, final boolean quiet, final Map esSettings) - throws NodeValidationException { + throws NodeValidationException, UserException { try { Bootstrap.init(!daemonize, pidFile, quiet, esSettings); } catch (BootstrapException | RuntimeException e) { diff --git a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index d990a28ea46..b0c1d094e19 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -30,6 +30,8 @@ import org.apache.logging.log4j.core.config.builder.impl.BuiltConfiguration; import org.apache.logging.log4j.core.config.composite.CompositeConfiguration; import org.apache.logging.log4j.core.config.properties.PropertiesConfiguration; import org.apache.logging.log4j.core.config.properties.PropertiesConfigurationFactory; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; @@ -50,7 +52,7 @@ import java.util.Set; public class LogConfigurator { - public static void configure(final Environment environment, final boolean resolveConfig) throws IOException { + public static void configure(final Environment environment, final boolean resolveConfig) throws IOException, UserException { final Settings settings = environment.settings(); setLogConfigurationSystemProperty(environment, settings); @@ -75,6 +77,13 @@ public class LogConfigurator { return FileVisitResult.CONTINUE; } }); + + if (configurations.isEmpty()) { + throw new UserException( + ExitCodes.CONFIG, + "no log4j2.properties found; tried [" + environment.configFile() + "] and its subdirectories"); + } + context.start(new CompositeConfiguration(configurations)); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java index 40759f29f68..54bfae87373 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java @@ -27,6 +27,7 @@ import org.apache.logging.log4j.core.LoggerContext; import org.apache.logging.log4j.core.config.Configuration; import org.apache.logging.log4j.core.config.Configurator; import org.apache.logging.log4j.core.config.LoggerConfig; +import org.elasticsearch.cli.UserException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; @@ -34,7 +35,9 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.nio.file.Path; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.notNullValue; public class EvilLoggerConfigurationTests extends ESTestCase { @@ -85,7 +88,7 @@ public class EvilLoggerConfigurationTests extends ESTestCase { } } - public void testDefaults() throws IOException { + public void testDefaults() throws IOException, UserException { final Path configDir = getDataPath("config"); final String level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR).toString(); final Settings settings = Settings.builder() @@ -137,4 +140,15 @@ public class EvilLoggerConfigurationTests extends ESTestCase { assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(level)); } + public void testMissingConfigFile() { + final Path configDir = getDataPath("does_not_exist"); + final Settings settings = Settings.builder() + .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + final Environment environment = new Environment(settings); + UserException e = expectThrows(UserException.class, () -> LogConfigurator.configure(environment, true)); + assertThat(e, hasToString(containsString("no log4j2.properties found; tried"))); + } + } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index 4d7d450bb18..8a0f1b426b0 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -28,6 +28,7 @@ import org.apache.logging.log4j.core.appender.ConsoleAppender; import org.apache.logging.log4j.core.appender.CountingNoOpAppender; import org.apache.logging.log4j.core.config.Configurator; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.cli.UserException; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -59,7 +60,7 @@ public class EvilLoggerTests extends ESTestCase { super.tearDown(); } - public void testLocationInfoTest() throws IOException { + public void testLocationInfoTest() throws IOException, UserException { setupLogging("location_info"); final Logger testLogger = ESLoggerFactory.getLogger("test"); @@ -81,7 +82,7 @@ public class EvilLoggerTests extends ESTestCase { assertLogLine(events.get(4), Level.TRACE, location, "This is a trace message"); } - public void testDeprecationLogger() throws IOException { + public void testDeprecationLogger() throws IOException, UserException { setupLogging("deprecation"); final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger("deprecation")); @@ -97,7 +98,7 @@ public class EvilLoggerTests extends ESTestCase { "This is a deprecation message"); } - public void testFindAppender() throws IOException { + public void testFindAppender() throws IOException, UserException { setupLogging("find_appender"); final Logger hasConsoleAppender = ESLoggerFactory.getLogger("has_console_appender"); @@ -111,7 +112,7 @@ public class EvilLoggerTests extends ESTestCase { assertThat(countingNoOpAppender.getName(), equalTo("counting_no_op")); } - public void testPrefixLogger() throws IOException, IllegalAccessException { + public void testPrefixLogger() throws IOException, IllegalAccessException, UserException { setupLogging("prefix"); final String prefix = randomBoolean() ? null : randomAsciiOfLength(16); @@ -179,7 +180,7 @@ public class EvilLoggerTests extends ESTestCase { } } - private void setupLogging(final String config) throws IOException { + private void setupLogging(final String config) throws IOException, UserException { final Path configDir = getDataPath(config); // need to set custom path.conf so we can use a custom log4j2.properties file for the test final Settings settings = Settings.builder() diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/does_not_exist/nothing_to_see_here b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/does_not_exist/nothing_to_see_here new file mode 100644 index 00000000000..e69de29bb2d From b03c8073688a1a93b04cc207aa4345c5f858c059 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Thu, 15 Sep 2016 09:53:44 -0400 Subject: [PATCH 02/25] Rename service.bat to elasticsearch-service.bat (#20496) Closes gh-17528. --- .../org/elasticsearch/bootstrap/Elasticsearch.java | 3 ++- .../bin/{service.bat => elasticsearch-service.bat} | 2 +- docs/reference/setup/install/windows.asciidoc | 14 +++++++------- 3 files changed, 10 insertions(+), 9 deletions(-) rename distribution/src/main/resources/bin/{service.bat => elasticsearch-service.bat} (99%) diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 046f4a2b4a6..b4ec024b9e8 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -123,7 +123,8 @@ class Elasticsearch extends SettingCommand { * * http://commons.apache.org/proper/commons-daemon/procrun.html * - * NOTE: If this method is renamed and/or moved, make sure to update service.bat! + * NOTE: If this method is renamed and/or moved, make sure to + * update elasticsearch-service.bat! */ static void close(String[] args) throws IOException { Bootstrap.stop(); diff --git a/distribution/src/main/resources/bin/service.bat b/distribution/src/main/resources/bin/elasticsearch-service.bat similarity index 99% rename from distribution/src/main/resources/bin/service.bat rename to distribution/src/main/resources/bin/elasticsearch-service.bat index 1d62d81e764..609b8bda846 100644 --- a/distribution/src/main/resources/bin/service.bat +++ b/distribution/src/main/resources/bin/elasticsearch-service.bat @@ -75,7 +75,7 @@ echo Unknown option "%SERVICE_CMD%" :displayUsage echo. -echo Usage: service.bat install^|remove^|start^|stop^|manager [SERVICE_ID] +echo Usage: elasticsearch-service.bat install^|remove^|start^|stop^|manager [SERVICE_ID] goto:eof :doStart diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index ef0c5f2a71f..5fbb147edf4 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -2,7 +2,7 @@ === Install Elasticsearch on Windows Elasticsearch can be installed on Windows using the `.zip` package. This -comes with a `service.bat` command which will setup Elasticsearch to run as a +comes with a `elasticsearch-service.bat` command which will setup Elasticsearch to run as a service. The latest stable version of Elasticsearch can be found on the @@ -13,7 +13,7 @@ link:/downloads/past-releases[Past Releases page]. [[install-windows]] ==== Download and install the `.zip` package -Download the `.zip` archive for Elastisearch v{version} from: https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/zip/elasticsearch/{version}/elasticsearch-{version}.zip +Download the `.zip` archive for Elasticsearch v{version} from: https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/zip/elasticsearch/{version}/elasticsearch-{version}.zip Unzip it with your favourite unzip tool. This will create a folder called +elasticsearch-{version}+, which we will refer to as `%ES_HOME%`. In a terminal @@ -65,7 +65,7 @@ include::check-running.asciidoc[] Elasticsearch can be installed as a service to run in the background or start automatically at boot time without any user interaction. This can be achieved -through the `service.bat` script in the `bin\` folder which allows one to +through the `elasticsearch-service.bat` script in the `bin\` folder which allows one to install, remove, manage or configure the service and potentially start and stop the service, all from the command-line. @@ -73,7 +73,7 @@ stop the service, all from the command-line. -------------------------------------------------- c:\elasticsearch-{version}{backslash}bin>service -Usage: service.bat install|remove|start|stop|manager [SERVICE_ID] +Usage: elasticsearch-service.bat install|remove|start|stop|manager [SERVICE_ID] -------------------------------------------------- The script requires one parameter (the command to execute) followed by an @@ -170,18 +170,18 @@ The Elasticsearch service can be configured prior to installation by setting the The timeout in seconds that procrun waits for service to exit gracefully. Defaults to `0`. -NOTE: At its core, `service.bat` relies on http://commons.apache.org/proper/commons-daemon/[Apache Commons Daemon] project +NOTE: At its core, `elasticsearch-service.bat` relies on http://commons.apache.org/proper/commons-daemon/[Apache Commons Daemon] project to install the service. Environment variables set prior to the service installation are copied and will be used during the service lifecycle. This means any changes made to them after the installation will not be picked up unless the service is reinstalled. NOTE: On Windows, the <> can be configured as for any other Elasticsearch installation when running Elasticsearch from the command line, or when installing Elasticsearch as a service for the first time. To adjust the heap size for an already installed service, -use the service manager: `bin\service.bat manager`. +use the service manager: `bin\elasticsearch-service.bat manager`. Using the Manager GUI:: -It is also possible to configure the service after it's been installed using the manager GUI (`elasticsearch-service-mgr.exe`), which offers insight into the installed service, including its status, startup type, JVM, start and stop settings amongst other things. Simply invoking `service.bat manager` from the command-line will open up the manager window: +It is also possible to configure the service after it's been installed using the manager GUI (`elasticsearch-service-mgr.exe`), which offers insight into the installed service, including its status, startup type, JVM, start and stop settings amongst other things. Simply invoking `elasticsearch-service.bat manager` from the command-line will open up the manager window: image::images/service-manager-win.png["Windows Service Manager GUI",align="center"] From 37489c3274b300b0c9ca18ba9a5be86655bf1d6c Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 15 Sep 2016 16:25:17 +0200 Subject: [PATCH 03/25] Add clusterUUID to RestMainAction output (#20503) Add clusterUUID to RestMainAction output GET / now returns the clusterUUID as well as part of its output for monitoring purposes --- .../action/main/MainResponse.java | 11 +++++++- .../action/main/TransportMainAction.java | 4 +-- .../action/main/MainActionTests.java | 25 +++++++++++++------ .../rest/action/RestMainActionTests.java | 8 +++--- docs/plugins/discovery-azure-classic.asciidoc | 2 ++ .../setup/install/check-running.asciidoc | 2 ++ .../reindex/remote/RemoteResponseParsers.java | 1 + .../rest-api-spec/test/info/10_info.yaml | 1 + 8 files changed, 39 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/main/MainResponse.java b/core/src/main/java/org/elasticsearch/action/main/MainResponse.java index 2403c3ee49c..c156dcfc98f 100644 --- a/core/src/main/java/org/elasticsearch/action/main/MainResponse.java +++ b/core/src/main/java/org/elasticsearch/action/main/MainResponse.java @@ -35,16 +35,18 @@ public class MainResponse extends ActionResponse implements ToXContent { private String nodeName; private Version version; private ClusterName clusterName; + private String clusterUuid; private Build build; private boolean available; MainResponse() { } - public MainResponse(String nodeName, Version version, ClusterName clusterName, Build build, boolean available) { + public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build, boolean available) { this.nodeName = nodeName; this.version = version; this.clusterName = clusterName; + this.clusterUuid = clusterUuid; this.build = build; this.available = available; } @@ -61,6 +63,10 @@ public class MainResponse extends ActionResponse implements ToXContent { return clusterName; } + public String getClusterUuid() { + return clusterUuid; + } + public Build getBuild() { return build; } @@ -75,6 +81,7 @@ public class MainResponse extends ActionResponse implements ToXContent { out.writeString(nodeName); Version.writeVersion(version, out); clusterName.writeTo(out); + out.writeString(clusterUuid); Build.writeBuild(build, out); out.writeBoolean(available); } @@ -85,6 +92,7 @@ public class MainResponse extends ActionResponse implements ToXContent { nodeName = in.readString(); version = Version.readVersion(in); clusterName = new ClusterName(in); + clusterUuid = in.readString(); build = Build.readBuild(in); available = in.readBoolean(); } @@ -94,6 +102,7 @@ public class MainResponse extends ActionResponse implements ToXContent { builder.startObject(); builder.field("name", nodeName); builder.field("cluster_name", clusterName.value()); + builder.field("cluster_uuid", clusterUuid); builder.startObject("version") .field("number", version.toString()) .field("build_hash", build.shortHash()) diff --git a/core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java b/core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java index c37268a52de..368696a9553 100644 --- a/core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java +++ b/core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java @@ -52,7 +52,7 @@ public class TransportMainAction extends HandledTransportAction params = new HashMap<>(); diff --git a/docs/plugins/discovery-azure-classic.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc index 0feb5f7f8e5..e9ad039d387 100644 --- a/docs/plugins/discovery-azure-classic.asciidoc +++ b/docs/plugins/discovery-azure-classic.asciidoc @@ -391,6 +391,7 @@ This command should give you a JSON result: { "name" : "Cp8oag6", "cluster_name" : "elasticsearch", + "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA", "version" : { "number" : "{version}", "build_hash" : "f27399d", @@ -403,6 +404,7 @@ This command should give you a JSON result: -------------------------------------------- // TESTRESPONSE[s/"name" : "Cp8oag6",/"name" : "$body.name",/] // TESTRESPONSE[s/"cluster_name" : "elasticsearch",/"cluster_name" : "$body.cluster_name",/] +// TESTRESPONSE[s/"cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA",/"cluster_uuid" : "$body.cluster_uuid",/] // TESTRESPONSE[s/"build_hash" : "f27399d",/"build_hash" : "$body.version.build_hash",/] // TESTRESPONSE[s/"build_date" : "2016-03-30T09:51:41.449Z",/"build_date" : $body.version.build_date,/] // TESTRESPONSE[s/"build_snapshot" : false,/"build_snapshot" : $body.version.build_snapshot,/] diff --git a/docs/reference/setup/install/check-running.asciidoc b/docs/reference/setup/install/check-running.asciidoc index 2e255ec35e5..a3ba7fc9123 100644 --- a/docs/reference/setup/install/check-running.asciidoc +++ b/docs/reference/setup/install/check-running.asciidoc @@ -16,6 +16,7 @@ which should give you a response something like this: { "name" : "Cp8oag6", "cluster_name" : "elasticsearch", + "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA", "version" : { "number" : "{version}", "build_hash" : "f27399d", @@ -28,6 +29,7 @@ which should give you a response something like this: -------------------------------------------- // TESTRESPONSE[s/"name" : "Cp8oag6",/"name" : "$body.name",/] // TESTRESPONSE[s/"cluster_name" : "elasticsearch",/"cluster_name" : "$body.cluster_name",/] +// TESTRESPONSE[s/"cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA",/"cluster_uuid" : "$body.cluster_uuid",/] // TESTRESPONSE[s/"build_hash" : "f27399d",/"build_hash" : "$body.version.build_hash",/] // TESTRESPONSE[s/"build_date" : "2016-03-30T09:51:41.449Z",/"build_date" : $body.version.build_date,/] // TESTRESPONSE[s/"build_snapshot" : false,/"build_snapshot" : $body.version.build_snapshot,/] diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java index 3ae1f33df54..1ee96f27c8d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java @@ -294,6 +294,7 @@ final class RemoteResponseParsers { MAIN_ACTION_PARSER.declareInt((p, v) -> {}, new ParseField("status")); MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("name")); MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("cluster_name")); + MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("cluster_uuid")); MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("name")); MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("tagline")); MAIN_ACTION_PARSER.declareObject(constructorArg(), VERSION_PARSER, new ParseField("version")); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/info/10_info.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/info/10_info.yaml index d46ec7ee2ab..d0c99ee0a7c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/info/10_info.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/info/10_info.yaml @@ -3,6 +3,7 @@ - do: {info: {}} - is_true: name - is_true: cluster_name + - is_true: cluster_uuid - is_true: tagline - is_true: version - is_true: version.number From e4c80c94e94deb759cf245daf725dfd80086b74a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 15 Sep 2016 11:57:05 -0400 Subject: [PATCH 04/25] Convert more search docs to CONSOLE `profile.asciidoc` now runs all of its command but it doesn't validate all of the results. Writing the validation is time consuming so I only did some of it. --- docs/reference/search/profile.asciidoc | 174 +++++++++++++------- docs/reference/search/request/sort.asciidoc | 18 +- 2 files changed, 129 insertions(+), 63 deletions(-) diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index e2c22caf6f4..0a03b322858 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -17,13 +17,17 @@ Any `_search` request can be profiled by adding a top-level `profile` parameter: [source,js] -------------------------------------------------- -curl -XGET 'localhost:9200/_search' -d '{ +GET /_search +{ "profile": true,<1> "query" : { - "match" : { "message" : "search test" } + "match" : { "message" : "message number" } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + <1> Setting the top-level `profile` parameter to `true` will enable profiling for the search @@ -40,70 +44,70 @@ This will yield the following result: "failed": 0 }, "hits": { - "total": 3, - "max_score": 1.078072, - "hits": [ ... ] <1> + "total": 4, + "max_score": 0.5093388, + "hits": [...] <1> }, "profile": { "shards": [ { - "id": "[2aE02wS1R8q_QFnYu6vDVQ][test][1]", + "id": "[2aE02wS1R8q_QFnYu6vDVQ][twitter][1]", "searches": [ { "query": [ { "type": "BooleanQuery", - "description": "message:search message:test", - "time": "15.52889800ms", + "description": "message:message message:number", + "time": "1.873811000ms", "breakdown": { - "score": 6352, - "score_count": 1, - "build_scorer": 1800776, + "score": 51306, + "score_count": 4, + "build_scorer": 2935582, "build_scorer_count": 1, "match": 0, "match_count": 0, - "create_weight": 667400, + "create_weight": 919297, "create_weight_count": 1, - "next_doc": 10563, - "next_doc_count": 2, + "next_doc": 53876, + "next_doc_count": 5, "advance": 0, "advance_count": 0 }, "children": [ { "type": "TermQuery", - "description": "message:search", - "time": "4.938855000ms", + "description": "message:message", + "time": "0.3919430000ms", "breakdown": { - "score": 0, - "score_count": 0, - "build_scorer": 3230, + "score": 28776, + "score_count": 4, + "build_scorer": 784451, "build_scorer_count": 1, "match": 0, "match_count": 0, - "create_weight": 415612, + "create_weight": 1669564, "create_weight_count": 1, - "next_doc": 0, - "next_doc_count": 0, + "next_doc": 10111, + "next_doc_count": 5, "advance": 0, "advance_count": 0 } }, { "type": "TermQuery", - "description": "message:test", - "time": "0.5016660000ms", + "description": "message:number", + "time": "0.2106820000ms", "breakdown": { - "score": 5014, - "score_count": 1, - "build_scorer": 1689333, + "score": 4552, + "score_count": 4, + "build_scorer": 42602, "build_scorer_count": 1, "match": 0, "match_count": 0, - "create_weight": 166587, + "create_weight": 89323, "create_weight_count": 1, - "next_doc": 5542, - "next_doc_count": 2, + "next_doc": 2852, + "next_doc_count": 5, "advance": 0, "advance_count": 0 } @@ -111,21 +115,44 @@ This will yield the following result: ] } ], - "rewrite_time": 870954, + "rewrite_time": 51443, "collector": [ { "name": "SimpleTopScoreDocCollector", "reason": "search_top_hits", - "time": "0.009783000000ms" + "time": "0.06989100000ms" } ] } - ] + ], + "aggregations": [] } ] } } -------------------------------------------------- +// TESTRESPONSE[s/"took": 25/"took": $body.took/] +// TESTRESPONSE[s/"hits": \[...\]/"hits": $body.hits.hits/] +// TESTRESPONSE[s/"id": "\[2aE02wS1R8q_QFnYu6vDVQ\]\[twitter\]\[1\]"/"id": $body.profile.shards.0.id/] +// TESTRESPONSE[s/"rewrite_time": 51443/"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time/] +// TESTRESPONSE[s/"score": 51306/"score": $body.profile.shards.0.searches.0.query.0.breakdown.score/] +// TESTRESPONSE[s/"time": "1.873811000ms"/"time": $body.profile.shards.0.searches.0.query.0.time/] +// TESTRESPONSE[s/"build_scorer": 2935582/"build_scorer": $body.profile.shards.0.searches.0.query.0.breakdown.build_scorer/] +// TESTRESPONSE[s/"create_weight": 919297/"create_weight": $body.profile.shards.0.searches.0.query.0.breakdown.create_weight/] +// TESTRESPONSE[s/"next_doc": 53876/"next_doc": $body.profile.shards.0.searches.0.query.0.breakdown.next_doc/] +// TESTRESPONSE[s/"time": "0.3919430000ms"/"time": $body.profile.shards.0.searches.0.query.0.children.0.time/] +// TESTRESPONSE[s/"score": 28776/"score": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.score/] +// TESTRESPONSE[s/"build_scorer": 784451/"build_scorer": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.build_scorer/] +// TESTRESPONSE[s/"create_weight": 1669564/"create_weight": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.create_weight/] +// TESTRESPONSE[s/"next_doc": 10111/"next_doc": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.next_doc/] +// TESTRESPONSE[s/"time": "0.2106820000ms"/"time": $body.profile.shards.0.searches.0.query.0.children.1.time/] +// TESTRESPONSE[s/"score": 4552/"score": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.score/] +// TESTRESPONSE[s/"build_scorer": 42602/"build_scorer": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.build_scorer/] +// TESTRESPONSE[s/"create_weight": 89323/"create_weight": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.create_weight/] +// TESTRESPONSE[s/"next_doc": 2852/"next_doc": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.next_doc/] +// TESTRESPONSE[s/"time": "0.06989100000ms"/"time": $body.profile.shards.0.searches.0.collector.0.time/] +// Sorry for this mess.... + <1> Search results are returned, but were omitted here for brevity Even for a simple query, the response is relatively complicated. Let's break it down piece-by-piece before moving @@ -139,11 +166,11 @@ First, the overall structure of the profile response is as follows: "profile": { "shards": [ { - "id": "[2aE02wS1R8q_QFnYu6vDVQ][test][1]", <1> + "id": "[2aE02wS1R8q_QFnYu6vDVQ][twitter][1]", <1> "searches": [ { "query": [...], <2> - "rewrite_time": 870954, <3> + "rewrite_time": 51443, <3> "collector": [...] <4> } ], @@ -153,6 +180,12 @@ First, the overall structure of the profile response is as follows: } } -------------------------------------------------- +// TESTRESPONSE[s/"profile": /"took": $body.took, "timed_out": $body.timed_out, "_shards": $body._shards, "hits": $body.hits, "profile": /] +// TESTRESPONSE[s/"id": "\[2aE02wS1R8q_QFnYu6vDVQ\]\[twitter\]\[1\]"/"id": $body.profile.shards.0.id/] +// TESTRESPONSE[s/"query": \[...\]/"query": $body.profile.shards.0.searches.0.query/] +// TESTRESPONSE[s/"rewrite_time": 51443/"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time/] +// TESTRESPONSE[s/"collector": \[...\]/"collector": $body.profile.shards.0.searches.0.collector/] +// TESTRESPONSE[s/"aggregations": \[...\]/"aggregations": []/] <1> A profile is returned for each shard that participated in the response, and is identified by a unique ID <2> Each profile contains a section which holds details about the query execution @@ -195,33 +228,38 @@ the `advance` phase of that query is the cause, for example. The `query` section contains detailed timing of the query tree executed by Lucene on a particular shard. The overall structure of this query tree will resemble your original Elasticsearch query, but may be slightly (or sometimes very) different. It will also use similar but not always identical naming. Using our previous -`term` query example, let's analyze the `query` section: +`match` query example, let's analyze the `query` section: [source,js] -------------------------------------------------- "query": [ { "type": "BooleanQuery", - "description": "message:search message:test", - "time": "15.52889800ms", + "description": "message:message message:number", + "time": "1.873811000ms", "breakdown": {...}, <1> "children": [ { "type": "TermQuery", - "description": "message:search", - "time": "4.938855000ms", + "description": "message:message", + "time": "0.3919430000ms", "breakdown": {...} }, { "type": "TermQuery", - "description": "message:test", - "time": "0.5016660000ms", + "description": "message:number", + "time": "0.2106820000ms", "breakdown": {...} } ] } ] -------------------------------------------------- +// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n/] +// TESTRESPONSE[s/]$/],"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time, "collector": $body.profile.shards.0.searches.0.collector}], "aggregations": []}]}}/] +// TESTRESPONSE[s/"time": "1.873811000ms",\n.+"breakdown": \{...\}/"time": $body.profile.shards.0.searches.0.query.0.time, "breakdown": $body.profile.shards.0.searches.0.query.0.breakdown/] +// TESTRESPONSE[s/"time": "0.3919430000ms",\n.+"breakdown": \{...\}/"time": $body.profile.shards.0.searches.0.query.0.children.0.time, "breakdown": $body.profile.shards.0.searches.0.query.0.children.0.breakdown/] +// TESTRESPONSE[s/"time": "0.2106820000ms",\n.+"breakdown": \{...\}/"time": $body.profile.shards.0.searches.0.query.0.children.1.time, "breakdown": $body.profile.shards.0.searches.0.query.0.children.1.breakdown/] <1> The breakdown timings are omitted for simplicity Based on the profile structure, we can see that our `match` query was rewritten by Lucene into a BooleanQuery with two @@ -245,20 +283,27 @@ The `"breakdown"` component lists detailed timing statistics about low-level Luc [source,js] -------------------------------------------------- "breakdown": { - "score": 5014, - "score_count": 1, - "build_scorer": 1689333, - "build_scorer_count": 1, - "match": 0, - "match_count": 0, - "create_weight": 166587, - "create_weight_count": 1, - "next_doc": 5542, - "next_doc_count": 2, - "advance": 0, - "advance_count": 0 + "score": 51306, + "score_count": 4, + "build_scorer": 2935582, + "build_scorer_count": 1, + "match": 0, + "match_count": 0, + "create_weight": 919297, + "create_weight_count": 1, + "next_doc": 53876, + "next_doc_count": 5, + "advance": 0, + "advance_count": 0 } -------------------------------------------------- +// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:message message:number",\n"time": $body.profile.shards.0.searches.0.query.0.time,/] +// TESTRESPONSE[s/}$/},\n"children": $body.profile.shards.0.searches.0.query.0.children}],\n"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time, "collector": $body.profile.shards.0.searches.0.collector}], "aggregations": []}]}}/] +// TESTRESPONSE[s/"score": 51306/"score": $body.profile.shards.0.searches.0.query.0.breakdown.score/] +// TESTRESPONSE[s/"time": "1.873811000ms"/"time": $body.profile.shards.0.searches.0.query.0.time/] +// TESTRESPONSE[s/"build_scorer": 2935582/"build_scorer": $body.profile.shards.0.searches.0.query.0.breakdown.build_scorer/] +// TESTRESPONSE[s/"create_weight": 919297/"create_weight": $body.profile.shards.0.searches.0.query.0.breakdown.create_weight/] +// TESTRESPONSE[s/"next_doc": 53876/"next_doc": $body.profile.shards.0.searches.0.query.0.breakdown.next_doc/] Timings are listed in wall-clock nanoseconds and are not normalized at all. All caveats about the overall `"time"` apply here. The intention of the breakdown is to give you a feel for A) what machinery in Lucene is @@ -348,10 +393,13 @@ Looking at the previous example: { "name": "SimpleTopScoreDocCollector", "reason": "search_top_hits", - "time": "2.206529000ms" + "time": "0.06989100000ms" } ] -------------------------------------------------- +// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n"query": $body.profile.shards.0.searches.0.query,\n"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time,/] +// TESTRESPONSE[s/]$/]}], "aggregations": []}]}}/] +// TESTRESPONSE[s/"time": "0.06989100000ms"/"time": $body.profile.shards.0.searches.0.collector.0.time/] We see a single collector named `SimpleTopScoreDocCollector`. This is the default "scoring and sorting" Collector used by Elasticsearch. The `"reason"` field attempts to give a plain english description of the class name. The @@ -473,6 +521,8 @@ GET /test/_search } } -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT test\n/] This example has: @@ -509,7 +559,7 @@ And the response: "create_weight_count": 1, "build_scorer": 377872, "build_scorer_count": 1, - "advance": 0 + "advance": 0, "advance_count": 0 } }, @@ -528,7 +578,7 @@ And the response: "create_weight_count": 1, "build_scorer": 112551, "build_scorer_count": 1, - "advance": 0 + "advance": 0, "advance_count": 0 } } @@ -578,7 +628,7 @@ And the response: "create_weight_count": 1, "build_scorer": 38310, "build_scorer_count": 1, - "advance": 0 + "advance": 0, "advance_count": 0 } } @@ -640,7 +690,7 @@ the following example aggregations request: [source,js] -------------------------------------------------- -curl -XGET "http://localhost:9200/house-prices/_search" -d' +GET /house-prices/_search { "profile": true, "size": 0, @@ -658,8 +708,10 @@ curl -XGET "http://localhost:9200/house-prices/_search" -d' } } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT house-prices\n/] Which yields the following aggregation profile output diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index 85c5d1e675f..d3bbe283a2d 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -351,9 +351,23 @@ Multiple geo points can be passed as an array containing any `geo_point` format, [source,js] -------------------------------------------------- -"pin.location" : [[-70, 40], [-71, 42]] -"pin.location" : [{"lat": 40, "lon": -70}, {"lat": 42, "lon": -71}] +GET /_search +{ + "sort" : [ + { + "_geo_distance" : { + "pin.location" : [[-70, 40], [-71, 42]], + "order" : "asc", + "unit" : "km" + } + } + ], + "query" : { + "term" : { "user" : "kimchy" } + } +} -------------------------------------------------- +// CONSOLE and so forth. From 16ed2fb423fd387faa513cbed991a9fcb64bc572 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Thu, 15 Sep 2016 14:28:02 -0400 Subject: [PATCH 05/25] [TEST] ensure context filtering with valid utf-8 characters work with completion suggester --- .../ContextCompletionSuggestSearchIT.java | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index 0901a6201a2..fa94eabeb53 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -21,16 +21,16 @@ package org.elasticsearch.search.suggest; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; -import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.suggest.CompletionSuggestSearchIT.CompletionMappingBuilder; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; import org.elasticsearch.search.suggest.completion.context.CategoryContextMapping; @@ -40,7 +40,6 @@ import org.elasticsearch.search.suggest.completion.context.ContextMapping; import org.elasticsearch.search.suggest.completion.context.GeoContextMapping; import org.elasticsearch.search.suggest.completion.context.GeoQueryContext; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -54,6 +53,8 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.core.IsEqual.equalTo; @SuppressCodecs("*") // requires custom completion format public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { @@ -160,6 +161,27 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { assertSuggestions("foo", prefix, "sugxgestion9", "sugxgestion8", "sugxgestion7", "sugxgestion6", "sugxgestion5"); } + public void testContextFilteringWorksWithUTF8Categories() throws Exception { + CategoryContextMapping contextMapping = ContextBuilder.category("cat").field("cat").build(); + LinkedHashMap map = new LinkedHashMap<>(Collections.singletonMap("cat", contextMapping)); + final CompletionMappingBuilder mapping = new CompletionMappingBuilder().context(map); + createIndexAndMapping(mapping); + IndexResponse indexResponse = client().prepareIndex(INDEX, TYPE, "1") + .setSource(jsonBuilder().startObject() + .startObject(FIELD) + .field("input", "suggestion") + .endObject() + .field("cat", "ctx\\u00e4") + .endObject()) + .get(); + assertThat(indexResponse.status(), equalTo(RestStatus.CREATED)); + assertNoFailures(client().admin().indices().prepareRefresh(INDEX).get()); + CompletionSuggestionBuilder contextSuggestQuery = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg") + .contexts(Collections.singletonMap("cat", + Collections.singletonList(CategoryQueryContext.builder().setCategory("ctx\\u00e4").build()))); + assertSuggestions("foo", contextSuggestQuery, "suggestion"); + } + public void testSingleContextFiltering() throws Exception { CategoryContextMapping contextMapping = ContextBuilder.category("cat").field("cat").build(); LinkedHashMap map = new LinkedHashMap(Collections.singletonMap("cat", contextMapping)); From d0be96df7b3d48ce1432f5557ad1ede3397a51f9 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 14 Sep 2016 17:04:51 -0400 Subject: [PATCH 06/25] Clean up snapshots after each REST test The only repository we can be sure is safe to clean is `fs` so we clean any snapshots in those repositories after each test. Other repositories like url and azure tend to throw exceptions rather than let us fetch their contents during the REST test. So we clean what we can.... Closes #18159 --- .../test/snapshot.get/10_basic.yaml | 11 ++----- .../test/snapshot.status/10_basic.yaml | 11 ++----- .../test/rest/ESRestTestCase.java | 32 +++++++++++++++++-- 3 files changed, 33 insertions(+), 21 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yaml index bd609e3e3bf..24a7ac6adc6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yaml @@ -9,13 +9,6 @@ setup: settings: location: "test_repo_get_1_loc" ---- -teardown: - - - do: - snapshot.delete_repository: - repository: test_repo_get_1 - --- "Get snapshot info": @@ -39,7 +32,7 @@ teardown: snapshot: test_snapshot - is_true: snapshots - + --- "Get missing snapshot info throws an exception": @@ -48,7 +41,7 @@ teardown: snapshot.get: repository: test_repo_get_1 snapshot: test_nonexistent_snapshot - + --- "Get missing snapshot info succeeds when ignoreUnavailable is true": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yaml index d4548553e25..838c1264974 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yaml @@ -9,13 +9,6 @@ setup: settings: location: "test_repo_status_1_loc" ---- -teardown: - - - do: - snapshot.delete_repository: - repository: test_repo_status_1 - --- "Get snapshot status": @@ -39,7 +32,7 @@ teardown: snapshot: test_snapshot - is_true: snapshots - + --- "Get missing snapshot status throws an exception": @@ -48,7 +41,7 @@ teardown: snapshot.status: repository: test_repo_status_1 snapshot: test_nonexistent_snapshot - + --- "Get missing snapshot status succeeds when ignoreUnavailable is true": diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 573c301105a..fdce0248ed3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -55,6 +55,7 @@ import java.util.Set; import javax.net.ssl.SSLContext; +import static java.util.Collections.singletonMap; import static java.util.Collections.sort; import static java.util.Collections.unmodifiableList; @@ -151,9 +152,34 @@ public class ESRestTestCase extends ESTestCase { // wipe index templates adminClient().performRequest("DELETE", "_template/*"); - // wipe snapshots - // Technically this deletes all repositories and leave the snapshots in the repository. OK. - adminClient().performRequest("DELETE", "_snapshot/*"); + wipeSnapshots(); + } + + /** + * Wipe fs snapshots we created one by one and all repositories so that the next test can create the repositories fresh and they'll + * start empty. There isn't an API to delete all snapshots. There is an API to delete all snapshot repositories but that leaves all of + * the snapshots intact in the repository. + */ + private void wipeSnapshots() throws IOException { + for (Map.Entry repo : entityAsMap(adminClient.performRequest("GET", "_snapshot/_all")).entrySet()) { + String repoName = repo.getKey(); + Map repoSpec = (Map) repo.getValue(); + String repoType = (String) repoSpec.get("type"); + if (repoType.equals("fs")) { + // All other repo types we really don't have a chance of being able to iterate properly, sadly. + String url = "_snapshot/" + repoName + "/_all"; + Map params = singletonMap("ignore_unavailable", "true"); + List snapshots = (List) entityAsMap(adminClient.performRequest("GET", url, params)).get("snapshots"); + for (Object snapshot : snapshots) { + Map snapshotInfo = (Map) snapshot; + String name = (String) snapshotInfo.get("snapshot"); + logger.debug("wiping snapshot [{}/{}]", repoName, name); + adminClient().performRequest("DELETE", "_snapshot/" + repoName + "/" + name); + } + } + logger.debug("wiping snapshot repository [{}]", repoName); + adminClient().performRequest("DELETE", "_snapshot/" + repoName); + } } /** From 2233d48235aeb57dbcfb8c2ab86fdd502efb971e Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 15 Sep 2016 21:40:09 +0200 Subject: [PATCH 07/25] add a reduced TCP_CONNECT_TIMEOUT setting to DiscoveryWithServiceDisruptionsIT The default of 30s causes some tests to timeout when running ensureGreen and similar. This is because network delays simulation blocks connect until either the connect timeout expires or the disruption configured time stops. We do *not* immediately connect when the disruption is stopped. --- .../discovery/DiscoveryWithServiceDisruptionsIT.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index d0afcde265d..f04db89796c 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -82,6 +82,7 @@ import org.elasticsearch.test.disruption.SingleNodeDisruption; import org.elasticsearch.test.disruption.SlowClusterStateProcessing; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; @@ -183,6 +184,11 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { .put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly + .put(TcpTransport.TCP_CONNECT_TIMEOUT.getKey(), "10s") // Network delay disruption waits for the min between this + // value and the time of disruption and does not recover immediately + // when disruption is stop. We should make sure we recover faster + // then the default of 30s, causing ensureGreen and friends to time out + .build(); @Override From bdad62a2f8fe1c1455656471e769df11c067cbf6 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Thu, 15 Sep 2016 16:32:28 -0400 Subject: [PATCH 08/25] Fix silently accepting malformed queries Currently, we silently accept malformed query where more than one key is defined at the top-level for query object. If all the keys have a valid query body, only the last query is executed, besides throwing off parsing for additional suggest, aggregation or highlighting defined in the search request. This commit throws a parsing exception when we encounter a query with multiple keys. closes #20500 --- .../index/query/QueryParseContext.java | 4 ++++ .../index/query/BoolQueryBuilderTests.java | 2 +- .../FunctionScoreQueryBuilderTests.java | 2 +- .../builder/SearchSourceBuilderTests.java | 22 +++++++++++++++++++ 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java index 9ed374db212..7b5fa97825f 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java @@ -130,6 +130,10 @@ public class QueryParseContext implements ParseFieldMatcherSupplier { "[" + queryName + "] malformed query, expected [END_OBJECT] but found [" + parser.currentToken() + "]"); } parser.nextToken(); + if (parser.currentToken() == XContentParser.Token.FIELD_NAME) { + throw new ParsingException(parser.getTokenLocation(), + "[" + queryName + "] malformed query, unexpected [FIELD_NAME] found [" + parser.currentName() + "]"); + } return result; } diff --git a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java index 5e911261171..5f9c7e0881d 100644 --- a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java @@ -370,7 +370,7 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase parseQuery(query, ParseFieldMatcher.EMPTY)); - assertEquals("expected [END_OBJECT] but got [FIELD_NAME], possibly too many query clauses", ex.getMessage()); + assertEquals("[match] malformed query, unexpected [FIELD_NAME] found [match]", ex.getMessage()); } public void testRewrite() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index c5b31eb6a92..c280bc4a49e 100644 --- a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -715,7 +715,7 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase messageMatcher) { diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 4d2b0d394a8..fe324190106 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.builder; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -422,6 +423,27 @@ public class SearchSourceBuilderTests extends ESTestCase { } } + public void testInvalid() throws Exception { + String restContent = " { \"query\": {\n" + + " \"multi_match\": {\n" + + " \"query\": \"workd\",\n" + + " \"fields\": [\"title^5\", \"plain_body\"]\n" + + " },\n" + + " \"filters\": {\n" + + " \"terms\": {\n" + + " \"status\": [ 3 ]\n" + + " }\n" + + " }\n" + + " } }"; + try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { + SearchSourceBuilder.fromXContent(createParseContext(parser), + searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers); + fail("invalid query syntax multiple keys under query"); + } catch (ParsingException e) { + assertThat(e.getMessage(), containsString("filters")); + } + } + public void testParseSort() throws IOException { { String restContent = " { \"sort\": \"foo\"}"; From 1e2ef192c93fa0216dc9c1133d3bd47134065b33 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 15 Sep 2016 22:54:47 +0200 Subject: [PATCH 09/25] [TEST] Reduce the number of docs per indexRandom in FieldSortIT#testIssue6614 --- .../java/org/elasticsearch/search/sort/FieldSortIT.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java b/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java index 6d1c64437d6..84dd3dabf6b 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -126,6 +126,7 @@ public class FieldSortIT extends ESIntegTestCase { List builders = new ArrayList<>(); boolean strictTimeBasedIndices = randomBoolean(); final int numIndices = randomIntBetween(2, 25); // at most 25 days in the month + int docs = 0; for (int i = 0; i < numIndices; i++) { final String indexId = strictTimeBasedIndices ? "idx_" + i : "idx"; if (strictTimeBasedIndices || i == 0) { @@ -141,9 +142,10 @@ public class FieldSortIT extends ESIntegTestCase { String.format(Locale.ROOT, "%02d", j+1) + ":00:00")); } + indexRandom(true, builders); + docs += builders.size(); + builders.clear(); } - int docs = builders.size(); - indexRandom(true, builders); SearchResponse allDocsResponse = client().prepareSearch().setQuery( QueryBuilders.boolQuery().must(QueryBuilders.termQuery("foo", "bar")).must( QueryBuilders.rangeQuery("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01"))) From 9b17242b5d9c931177181bcec409899f7f48316e Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Wed, 14 Sep 2016 15:56:46 -0400 Subject: [PATCH 10/25] [DOC] Add note for full cluster restart for installing plugins using custom metadata Currently, we check if a node has the same set of custom metadata as the master before joining the cluster. This implies freshly installing a plugin that has its custom metadata requires a full cluster restart. --- docs/plugins/index.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/plugins/index.asciidoc b/docs/plugins/index.asciidoc index 9dd8045746f..94f36a95c9d 100644 --- a/docs/plugins/index.asciidoc +++ b/docs/plugins/index.asciidoc @@ -16,6 +16,10 @@ Plugins contain JAR files, but may also contain scripts and config files, and must be installed on every node in the cluster. After installation, each node must be restarted before the plugin becomes visible. +NOTE: A full cluster restart is required for installing plugins that have +custom cluster state metadata, such as X-Pack. It is still possible to upgrade +such plugins with a rolling restart. + This documentation distinguishes two categories of plugins: Core Plugins:: This category identifies plugins that are part of Elasticsearch From 577dcb32374cf2528a5deadf0a57f1ea1d5a9cbd Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 15 Sep 2016 23:39:11 +0200 Subject: [PATCH 11/25] Add current cluster state version to zen pings and use them in master election (#20384) During a networking partition, cluster states updates (like mapping changes or shard assignments) are committed if a majority of the masters node received the update correctly. This means that the current master has access to enough nodes in the cluster to continue to operate correctly. When the network partition heals, the isolated nodes catch up with the current state and get the changes they couldn't receive before. However, if a second partition happens while the cluster is still recovering from the previous one *and* the old master is put in the minority side, it may be that a new master is elected which did not yet catch up. If that happens, cluster state updates can be lost. This commit fixed 95% of this rare problem by adding the current cluster state version to `PingResponse` and use them when deciding which master to join (and thus casting the node's vote). Note: this doesn't fully mitigate the problem as a cluster state update which is issued concurrently with a network partition can be lost if the partition prevents the commit message (part of the two phased commit of cluster state updates) from reaching any single node in the majority side *and* the partition does allow for the master to acknowledge the change. We are working on a more comprehensive fix but that requires considerate work and is targeted at 6.0. --- .../common/settings/ClusterSettings.java | 2 +- .../discovery/DiscoveryModule.java | 3 +- .../zen/{elect => }/ElectMasterService.java | 141 ++++++++++++------ .../discovery/zen/NodeJoinController.java | 1 - .../discovery/zen/ZenDiscovery.java | 97 ++++-------- .../zen/ping/PingContextProvider.java | 5 +- .../discovery/zen/ping/ZenPing.java | 72 +++++---- .../discovery/zen/ping/ZenPingService.java | 76 +++------- .../zen/ping/unicast/UnicastZenPing.java | 13 +- .../master/IndexingMasterFailoverIT.java | 2 +- .../cluster/MinimumMasterNodesIT.java | 2 +- .../allocation/AwarenessAllocationIT.java | 2 +- .../discovery/DiscoveryModuleTests.java | 5 +- .../DiscoveryWithServiceDisruptionsIT.java | 76 ++++++++-- .../zen/ElectMasterServiceTests.java | 89 +++++++---- .../zen/NodeJoinControllerTests.java | 1 - ...eRemovalClusterStateTaskExecutorTests.java | 1 - .../discovery/zen/ZenDiscoveryIT.java | 44 ------ .../discovery/zen/ZenDiscoveryUnitTests.java | 8 +- .../discovery/zen/ZenPingTests.java | 15 +- ...enPingIT.java => UnicastZenPingTests.java} | 47 +++--- .../gateway/GatewayIndexStateIT.java | 2 +- .../gateway/QuorumGatewayIT.java | 2 +- .../DedicatedClusterSnapshotRestoreIT.java | 2 +- docs/resiliency/index.asciidoc | 16 ++ .../elasticsearch/test/ESIntegTestCase.java | 104 ++++++------- .../test/InternalTestCluster.java | 10 +- .../test/disruption/NetworkDisruption.java | 12 ++ .../disruption/NetworkDisruptionTests.java | 15 ++ 29 files changed, 488 insertions(+), 377 deletions(-) rename core/src/main/java/org/elasticsearch/discovery/zen/{elect => }/ElectMasterService.java (61%) rename core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/{UnicastZenPingIT.java => UnicastZenPingTests.java} (85%) diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 1ce156b8536..1256021b96e 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -54,8 +54,8 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.fd.FaultDetection; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.env.Environment; diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 040066adeb6..b41316b6534 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.Multibinder; import org.elasticsearch.common.settings.Setting; @@ -27,8 +26,8 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.discovery.local.LocalDiscovery; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java similarity index 61% rename from core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java rename to core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java index 3ef9138f933..1d11f5cf0f5 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java @@ -17,11 +17,10 @@ * under the License. */ -package org.elasticsearch.discovery.zen.elect; +package org.elasticsearch.discovery.zen; import com.carrotsearch.hppc.ObjectContainer; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; @@ -33,9 +32,11 @@ import org.elasticsearch.common.util.CollectionUtils; import java.util.ArrayList; import java.util.Arrays; -import java.util.Comparator; +import java.util.Collection; import java.util.Iterator; import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; /** * @@ -45,17 +46,64 @@ public class ElectMasterService extends AbstractComponent { public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, Property.Dynamic, Property.NodeScope); - // This is the minimum version a master needs to be on, otherwise it gets ignored - // This is based on the minimum compatible version of the current version this node is on - private final Version minMasterVersion; - private final NodeComparator nodeComparator = new NodeComparator(); - private volatile int minimumMasterNodes; + /** + * a class to encapsulate all the information about a candidate in a master election + * that is needed to decided which of the candidates should win + */ + public static class MasterCandidate { + + public static final long UNRECOVERED_CLUSTER_VERSION = -1; + + final DiscoveryNode node; + + final long clusterStateVersion; + + public MasterCandidate(DiscoveryNode node, long clusterStateVersion) { + Objects.requireNonNull(node); + assert clusterStateVersion >= -1 : "got: " + clusterStateVersion; + assert node.isMasterNode(); + this.node = node; + this.clusterStateVersion = clusterStateVersion; + } + + public DiscoveryNode getNode() { + return node; + } + + public long getClusterStateVersion() { + return clusterStateVersion; + } + + @Override + public String toString() { + return "Candidate{" + + "node=" + node + + ", clusterStateVersion=" + clusterStateVersion + + '}'; + } + + /** + * compares two candidates to indicate which the a better master. + * A higher cluster state version is better + * + * @return -1 if c1 is a batter candidate, 1 if c2. + */ + public static int compare(MasterCandidate c1, MasterCandidate c2) { + // we explicitly swap c1 and c2 here. the code expects "better" is lower in a sorted + // list, so if c2 has a higher cluster state version, it needs to come first. + int ret = Long.compare(c2.clusterStateVersion, c1.clusterStateVersion); + if (ret == 0) { + ret = compareNodes(c1.getNode(), c2.getNode()); + } + return ret; + } + } + @Inject public ElectMasterService(Settings settings) { super(settings); - this.minMasterVersion = Version.CURRENT.minimumCompatibilityVersion(); this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings); logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes); } @@ -69,16 +117,41 @@ public class ElectMasterService extends AbstractComponent { } public boolean hasEnoughMasterNodes(Iterable nodes) { - if (minimumMasterNodes < 1) { - return true; - } int count = 0; for (DiscoveryNode node : nodes) { if (node.isMasterNode()) { count++; } } - return count >= minimumMasterNodes; + return count > 0 && (minimumMasterNodes < 0 || count >= minimumMasterNodes); + } + + public boolean hasEnoughCandidates(Collection candidates) { + if (candidates.isEmpty()) { + return false; + } + if (minimumMasterNodes < 1) { + return true; + } + assert candidates.stream().map(MasterCandidate::getNode).collect(Collectors.toSet()).size() == candidates.size() : + "duplicates ahead: " + candidates; + return candidates.size() >= minimumMasterNodes; + } + + /** + * Elects a new master out of the possible nodes, returning it. Returns null + * if no master has been elected. + */ + public MasterCandidate electMaster(Collection candidates) { + assert hasEnoughCandidates(candidates); + List sortedCandidates = new ArrayList<>(candidates); + sortedCandidates.sort(MasterCandidate::compare); + return sortedCandidates.get(0); + } + + /** selects the best active master to join, where multiple are discovered */ + public DiscoveryNode tieBreakActiveMasters(Collection activeMasters) { + return activeMasters.stream().min(ElectMasterService::compareNodes).get(); } public boolean hasTooManyMasterNodes(Iterable nodes) { @@ -107,7 +180,7 @@ public class ElectMasterService extends AbstractComponent { */ public List sortByMasterLikelihood(Iterable nodes) { ArrayList sortedNodes = CollectionUtils.iterableAsArrayList(nodes); - CollectionUtil.introSort(sortedNodes, nodeComparator); + CollectionUtil.introSort(sortedNodes, ElectMasterService::compareNodes); return sortedNodes; } @@ -130,25 +203,6 @@ public class ElectMasterService extends AbstractComponent { return nextPossibleMasters.toArray(new DiscoveryNode[nextPossibleMasters.size()]); } - /** - * Elects a new master out of the possible nodes, returning it. Returns null - * if no master has been elected. - */ - public DiscoveryNode electMaster(Iterable nodes) { - List sortedNodes = sortedMasterNodes(nodes); - if (sortedNodes == null || sortedNodes.isEmpty()) { - return null; - } - DiscoveryNode masterNode = sortedNodes.get(0); - // Sanity check: maybe we don't end up here, because serialization may have failed. - if (masterNode.getVersion().before(minMasterVersion)) { - logger.warn("ignoring master [{}], because the version [{}] is lower than the minimum compatible version [{}]", masterNode, masterNode.getVersion(), minMasterVersion); - return null; - } else { - return masterNode; - } - } - private List sortedMasterNodes(Iterable nodes) { List possibleNodes = CollectionUtils.iterableAsArrayList(nodes); if (possibleNodes.isEmpty()) { @@ -161,21 +215,18 @@ public class ElectMasterService extends AbstractComponent { it.remove(); } } - CollectionUtil.introSort(possibleNodes, nodeComparator); + CollectionUtil.introSort(possibleNodes, ElectMasterService::compareNodes); return possibleNodes; } - private static class NodeComparator implements Comparator { - - @Override - public int compare(DiscoveryNode o1, DiscoveryNode o2) { - if (o1.isMasterNode() && !o2.isMasterNode()) { - return -1; - } - if (!o1.isMasterNode() && o2.isMasterNode()) { - return 1; - } - return o1.getId().compareTo(o2.getId()); + /** master nodes go before other nodes, with a secondary sort by id **/ + private static int compareNodes(DiscoveryNode o1, DiscoveryNode o2) { + if (o1.isMasterNode() && !o2.isMasterNode()) { + return -1; } + if (!o1.isMasterNode() && o2.isMasterNode()) { + return 1; + } + return o1.getId().compareTo(o2.getId()); } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 6f0b8966d09..bf8559fb949 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -41,7 +41,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.membership.MembershipAction; import java.util.ArrayList; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 132505fb403..43739a2f410 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -24,7 +24,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -56,7 +55,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.fd.MasterFaultDetection; import org.elasticsearch.discovery.zen.fd.NodesFaultDetection; import org.elasticsearch.discovery.zen.membership.MembershipAction; @@ -76,13 +74,10 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.stream.Collectors; @@ -146,9 +141,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private final JoinThreadControl joinThreadControl; - /** counts the time this node has joined the cluster or have elected it self as master */ - private final AtomicLong clusterJoinsCounter = new AtomicLong(); - // must initialized in doStart(), when we have the allocationService set private volatile NodeJoinController nodeJoinController; private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; @@ -304,8 +296,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } @Override - public boolean nodeHasJoinedClusterOnce() { - return clusterJoinsCounter.get() > 0; + public ClusterState clusterState() { + return clusterService.state(); } /** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */ @@ -406,8 +398,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover joinThreadControl.markThreadAsDone(currentThread); // we only starts nodesFD if we are master (it may be that we received a cluster state while pinging) nodesFD.updateNodesAndPing(state); // start the nodes FD - long count = clusterJoinsCounter.incrementAndGet(); - logger.trace("cluster joins counter set to [{}] (elected as master)", count); } @Override @@ -764,9 +754,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover if (currentState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock())) { // its a fresh update from the master as we transition from a start of not having a master to having one logger.debug("got first state from fresh master [{}]", newClusterState.nodes().getMasterNodeId()); - long count = clusterJoinsCounter.incrementAndGet(); - logger.trace("updated cluster join cluster to [{}]", count); - return newClusterState; } @@ -873,16 +860,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } else if (nodeJoinController == null) { throw new IllegalStateException("discovery module is not yet started"); } else { - // The minimum supported version for a node joining a master: - Version minimumNodeJoinVersion = localNode().getVersion().minimumCompatibilityVersion(); - // Sanity check: maybe we don't end up here, because serialization may have failed. - if (node.getVersion().before(minimumNodeJoinVersion)) { - callback.onFailure( - new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]") - ); - return; - } - // try and connect to the node, if it fails, we can raise an exception back to the client... transportService.connectToNode(node); @@ -901,14 +878,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private DiscoveryNode findMaster() { logger.trace("starting to ping"); - ZenPing.PingResponse[] fullPingResponses = pingService.pingAndWait(pingTimeout); + List fullPingResponses = pingService.pingAndWait(pingTimeout).toList(); if (fullPingResponses == null) { logger.trace("No full ping responses"); return null; } if (logger.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); - if (fullPingResponses.length == 0) { + if (fullPingResponses.size() == 0) { sb.append(" {none}"); } else { for (ZenPing.PingResponse pingResponse : fullPingResponses) { @@ -918,69 +895,57 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover logger.trace("full ping responses:{}", sb); } + final DiscoveryNode localNode = clusterService.localNode(); + + // add our selves + assert fullPingResponses.stream().map(ZenPing.PingResponse::node) + .filter(n -> n.equals(localNode)).findAny().isPresent() == false; + + fullPingResponses.add(new ZenPing.PingResponse(localNode, null, clusterService.state())); + // filter responses final List pingResponses = filterPingResponses(fullPingResponses, masterElectionIgnoreNonMasters, logger); - final DiscoveryNode localNode = clusterService.localNode(); - List pingMasters = new ArrayList<>(); + List activeMasters = new ArrayList<>(); for (ZenPing.PingResponse pingResponse : pingResponses) { - if (pingResponse.master() != null) { - // We can't include the local node in pingMasters list, otherwise we may up electing ourselves without - // any check / verifications from other nodes in ZenDiscover#innerJoinCluster() - if (!localNode.equals(pingResponse.master())) { - pingMasters.add(pingResponse.master()); - } + // We can't include the local node in pingMasters list, otherwise we may up electing ourselves without + // any check / verifications from other nodes in ZenDiscover#innerJoinCluster() + if (pingResponse.master() != null && !localNode.equals(pingResponse.master())) { + activeMasters.add(pingResponse.master()); } } // nodes discovered during pinging - Set activeNodes = new HashSet<>(); - // nodes discovered who has previously been part of the cluster and do not ping for the very first time - Set joinedOnceActiveNodes = new HashSet<>(); - if (localNode.isMasterNode()) { - activeNodes.add(localNode); - long joinsCounter = clusterJoinsCounter.get(); - if (joinsCounter > 0) { - logger.trace("adding local node to the list of active nodes that have previously joined the cluster (joins counter is [{}])", joinsCounter); - joinedOnceActiveNodes.add(localNode); - } - } + List masterCandidates = new ArrayList<>(); for (ZenPing.PingResponse pingResponse : pingResponses) { - activeNodes.add(pingResponse.node()); - if (pingResponse.hasJoinedOnce()) { - joinedOnceActiveNodes.add(pingResponse.node()); + if (pingResponse.node().isMasterNode()) { + masterCandidates.add(new ElectMasterService.MasterCandidate(pingResponse.node(), pingResponse.getClusterStateVersion())); } } - if (pingMasters.isEmpty()) { - if (electMaster.hasEnoughMasterNodes(activeNodes)) { - // we give preference to nodes who have previously already joined the cluster. Those will - // have a cluster state in memory, including an up to date routing table (which is not persistent to disk - // by the gateway) - DiscoveryNode master = electMaster.electMaster(joinedOnceActiveNodes); - if (master != null) { - return master; - } - return electMaster.electMaster(activeNodes); + if (activeMasters.isEmpty()) { + if (electMaster.hasEnoughCandidates(masterCandidates)) { + final ElectMasterService.MasterCandidate winner = electMaster.electMaster(masterCandidates); + logger.trace("candidate {} won election", winner); + return winner.getNode(); } else { // if we don't have enough master nodes, we bail, because there are not enough master to elect from - logger.trace("not enough master nodes [{}]", activeNodes); + logger.trace("not enough master nodes [{}]", masterCandidates); return null; } } else { - - assert !pingMasters.contains(localNode) : "local node should never be elected as master when other nodes indicate an active master"; + assert !activeMasters.contains(localNode) : "local node should never be elected as master when other nodes indicate an active master"; // lets tie break between discovered nodes - return electMaster.electMaster(pingMasters); + return electMaster.tieBreakActiveMasters(activeMasters); } } - static List filterPingResponses(ZenPing.PingResponse[] fullPingResponses, boolean masterElectionIgnoreNonMasters, Logger logger) { + static List filterPingResponses(List fullPingResponses, boolean masterElectionIgnoreNonMasters, Logger logger) { List pingResponses; if (masterElectionIgnoreNonMasters) { - pingResponses = Arrays.stream(fullPingResponses).filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList()); + pingResponses = fullPingResponses.stream().filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList()); } else { - pingResponses = Arrays.asList(fullPingResponses); + pingResponses = fullPingResponses; } if (logger.isDebugEnabled()) { diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/PingContextProvider.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/PingContextProvider.java index 568bc3ec16d..0bcc8b37d88 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/PingContextProvider.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/PingContextProvider.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.zen.ping; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; /** @@ -26,7 +27,7 @@ import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; */ public interface PingContextProvider extends DiscoveryNodesProvider { - /** return true if this node has previously joined the cluster at least once. False if this is first join */ - boolean nodeHasJoinedClusterOnce(); + /** return the current cluster state of the node */ + ClusterState clusterState(); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java index 5a9f5f463e2..b4bb61ad461 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java @@ -20,30 +20,42 @@ package org.elasticsearch.discovery.zen.ping; import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.zen.ElectMasterService; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; +import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; + public interface ZenPing extends LifecycleComponent { void setPingContextProvider(PingContextProvider contextProvider); void ping(PingListener listener, TimeValue timeout); - public interface PingListener { + interface PingListener { - void onPing(PingResponse[] pings); + /** + * called when pinging is done. + * + * @param pings ping result *must + */ + void onPing(Collection pings); } - public static class PingResponse implements Streamable { + class PingResponse implements Streamable { public static final PingResponse[] EMPTY = new PingResponse[0]; @@ -59,29 +71,36 @@ public interface ZenPing extends LifecycleComponent { private DiscoveryNode master; - private boolean hasJoinedOnce; + private long clusterStateVersion; private PingResponse() { } /** - * @param node the node which this ping describes - * @param master the current master of the node - * @param clusterName the cluster name of the node - * @param hasJoinedOnce true if the joined has successfully joined the cluster before + * @param node the node which this ping describes + * @param master the current master of the node + * @param clusterName the cluster name of the node + * @param clusterStateVersion the current cluster state version of that node + * ({@link ElectMasterService.MasterCandidate#UNRECOVERED_CLUSTER_VERSION} for not recovered) */ - public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterName clusterName, boolean hasJoinedOnce) { + public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterName clusterName, long clusterStateVersion) { this.id = idGenerator.incrementAndGet(); this.node = node; this.master = master; this.clusterName = clusterName; - this.hasJoinedOnce = hasJoinedOnce; + this.clusterStateVersion = clusterStateVersion; } - /** - * an always increasing unique identifier for this ping response. - * lower values means older pings. - */ + public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterState state) { + this(node, master, state.getClusterName(), + state.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) ? + ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION : state.version()); + } + + /** + * an always increasing unique identifier for this ping response. + * lower values means older pings. + */ public long id() { return this.id; } @@ -100,9 +119,11 @@ public interface ZenPing extends LifecycleComponent { return master; } - /** true if the joined has successfully joined the cluster before */ - public boolean hasJoinedOnce() { - return hasJoinedOnce; + /** + * the current cluster state version of that node ({@link ElectMasterService.MasterCandidate#UNRECOVERED_CLUSTER_VERSION} + * for not recovered) */ + public long getClusterStateVersion() { + return clusterStateVersion; } public static PingResponse readPingResponse(StreamInput in) throws IOException { @@ -118,7 +139,7 @@ public interface ZenPing extends LifecycleComponent { if (in.readBoolean()) { master = new DiscoveryNode(in); } - this.hasJoinedOnce = in.readBoolean(); + this.clusterStateVersion = in.readLong(); this.id = in.readLong(); } @@ -132,13 +153,14 @@ public interface ZenPing extends LifecycleComponent { out.writeBoolean(true); master.writeTo(out); } - out.writeBoolean(hasJoinedOnce); + out.writeLong(clusterStateVersion); out.writeLong(id); } @Override public String toString() { - return "ping_response{node [" + node + "], id[" + id + "], master [" + master + "], hasJoinedOnce [" + hasJoinedOnce + "], cluster_name[" + clusterName.value() + "]}"; + return "ping_response{node [" + node + "], id[" + id + "], master [" + master + "], cluster_state_version [" + clusterStateVersion + + "], cluster_name[" + clusterName.value() + "]}"; } } @@ -146,7 +168,7 @@ public interface ZenPing extends LifecycleComponent { /** * a utility collection of pings where only the most recent ping is stored per node */ - public static class PingCollection { + class PingCollection { Map pings; @@ -171,15 +193,15 @@ public interface ZenPing extends LifecycleComponent { } /** adds multiple pings if newer than previous pings from the same node */ - public synchronized void addPings(PingResponse[] pings) { + public synchronized void addPings(Iterable pings) { for (PingResponse ping : pings) { addPing(ping); } } - /** serialize current pings to an array */ - public synchronized PingResponse[] toArray() { - return pings.values().toArray(new PingResponse[pings.size()]); + /** serialize current pings to a list. It is guaranteed that the list contains one ping response per node */ + public synchronized List toList() { + return new ArrayList<>(pings.values()); } /** the number of nodes for which there are known pings */ diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java index bd5855666ac..3a2ddc10cfb 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java @@ -23,17 +23,15 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.AtomicBoolean; -public class ZenPingService extends AbstractLifecycleComponent implements ZenPing { +public class ZenPingService extends AbstractLifecycleComponent { private List zenPings = Collections.emptyList(); @@ -47,7 +45,6 @@ public class ZenPingService extends AbstractLifecycleComponent implements ZenPin return this.zenPings; } - @Override public void setPingContextProvider(PingContextProvider contextProvider) { if (lifecycle.started()) { throw new IllegalStateException("Can't set nodes provider when started"); @@ -78,60 +75,31 @@ public class ZenPingService extends AbstractLifecycleComponent implements ZenPin } } - public PingResponse[] pingAndWait(TimeValue timeout) { - final AtomicReference response = new AtomicReference<>(); - final CountDownLatch latch = new CountDownLatch(1); - ping(new PingListener() { - @Override - public void onPing(PingResponse[] pings) { - response.set(pings); - latch.countDown(); + public ZenPing.PingCollection pingAndWait(TimeValue timeout) { + final ZenPing.PingCollection response = new ZenPing.PingCollection(); + final CountDownLatch latch = new CountDownLatch(zenPings.size()); + for (ZenPing zenPing : zenPings) { + final AtomicBoolean counted = new AtomicBoolean(); + try { + zenPing.ping(pings -> { + response.addPings(pings); + if (counted.compareAndSet(false, true)) { + latch.countDown(); + } + }, timeout); + } catch (Exception ex) { + logger.warn("Ping execution failed", ex); + if (counted.compareAndSet(false, true)) { + latch.countDown(); + } } - }, timeout); + } try { latch.await(); - return response.get(); + return response; } catch (InterruptedException e) { logger.trace("pingAndWait interrupted"); - return null; - } - } - - @Override - public void ping(PingListener listener, TimeValue timeout) { - List zenPings = this.zenPings; - CompoundPingListener compoundPingListener = new CompoundPingListener(listener, zenPings); - for (ZenPing zenPing : zenPings) { - try { - zenPing.ping(compoundPingListener, timeout); - } catch (EsRejectedExecutionException ex) { - logger.debug("Ping execution rejected", ex); - compoundPingListener.onPing(null); - } - } - } - - private static class CompoundPingListener implements PingListener { - - private final PingListener listener; - - private final AtomicInteger counter; - - private PingCollection responses = new PingCollection(); - - private CompoundPingListener(PingListener listener, List zenPings) { - this.listener = listener; - this.counter = new AtomicInteger(zenPings.size()); - } - - @Override - public void onPing(PingResponse[] pings) { - if (pings != null) { - responses.addPings(pings); - } - if (counter.decrementAndGet() == 0) { - listener.onPing(responses.toArray()); - } + return response; } } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java index afe4902f887..637730c75fd 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java @@ -44,7 +44,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ping.PingContextProvider; import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.threadpool.ThreadPool; @@ -63,6 +63,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -236,8 +237,9 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin temporalResponses.clear(); } - public PingResponse[] pingAndWait(TimeValue duration) { - final AtomicReference response = new AtomicReference<>(); + // test only + Collection pingAndWait(TimeValue duration) { + final AtomicReference> response = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(1); ping(pings -> { response.set(pings); @@ -273,7 +275,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin protected void doRun() throws Exception { sendPings(duration, TimeValue.timeValueMillis(duration.millis() / 2), sendPingsHandler); sendPingsHandler.close(); - listener.onPing(sendPingsHandler.pingCollection().toArray()); + listener.onPing(sendPingsHandler.pingCollection().toList()); for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) { logger.trace("[{}] disconnecting from {}", sendPingsHandler.id(), node); transportService.disconnectFromNode(node); @@ -576,8 +578,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin } private PingResponse createPingResponse(DiscoveryNodes discoNodes) { - return new PingResponse(discoNodes.getLocalNode(), discoNodes.getMasterNode(), clusterName, - contextProvider.nodeHasJoinedClusterOnce()); + return new PingResponse(discoNodes.getLocalNode(), discoNodes.getMasterNode(), contextProvider.clusterState()); } static class UnicastPingResponse extends TransportResponse { diff --git a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index b30a3435479..87f86c3f596 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.fd.FaultDetection; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 07c1e5dd8da..2e86cb5b896 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -29,8 +29,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index 98c7b1a3d67..31e841227b8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -30,8 +30,8 @@ import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationD import org.elasticsearch.common.Priority; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 057b54c7a07..3b436f45410 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -18,13 +18,10 @@ */ package org.elasticsearch.discovery; -import org.elasticsearch.Version; import org.elasticsearch.common.inject.ModuleTestCase; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.local.LocalDiscovery; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; -import org.elasticsearch.node.Node; import org.elasticsearch.test.NoopDiscovery; /** diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index f04db89796c..b78b1d923b9 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -49,8 +49,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.fd.FaultDetection; import org.elasticsearch.discovery.zen.membership.MembershipAction; import org.elasticsearch.discovery.zen.ping.ZenPing; @@ -110,9 +110,12 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING; +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -164,7 +167,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { private List startCluster(int numberOfNodes, int minimumMasterNode, @Nullable int[] unicastHostsOrdinals) throws ExecutionException, InterruptedException { - configureUnicastCluster(numberOfNodes, unicastHostsOrdinals, minimumMasterNode); + configureCluster(numberOfNodes, unicastHostsOrdinals, minimumMasterNode); List nodes = internalCluster().startNodesAsync(numberOfNodes).get(); ensureStableCluster(numberOfNodes); @@ -196,15 +199,15 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { return Arrays.asList(MockTransportService.TestPlugin.class); } - private void configureUnicastCluster( + private void configureCluster( int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode ) throws ExecutionException, InterruptedException { - configureUnicastCluster(DEFAULT_SETTINGS, numberOfNodes, unicastHostsOrdinals, minimumMasterNode); + configureCluster(DEFAULT_SETTINGS, numberOfNodes, unicastHostsOrdinals, minimumMasterNode); } - private void configureUnicastCluster( + private void configureCluster( Settings settings, int numberOfNodes, @Nullable int[] unicastHostsOrdinals, @@ -1031,7 +1034,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } public void testClusterFormingWithASlowNode() throws Exception { - configureUnicastCluster(3, null, 2); + configureCluster(3, null, 2); SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(random(), 0, 0, 1000, 2000); @@ -1094,7 +1097,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { */ public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Exception { // don't use DEFAULT settings (which can cause node disconnects on a slow CI machine) - configureUnicastCluster(Settings.EMPTY, 3, null, 1); + configureCluster(Settings.EMPTY, 3, null, 1); InternalTestCluster.Async masterNodeFuture = internalCluster().startMasterOnlyNodeAsync(); InternalTestCluster.Async node_1Future = internalCluster().startDataOnlyNodeAsync(); @@ -1135,7 +1138,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { public void testIndexImportedFromDataOnlyNodesIfMasterLostDataFolder() throws Exception { // test for https://github.com/elastic/elasticsearch/issues/8823 - configureUnicastCluster(2, null, 1); + configureCluster(2, null, 1); String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNode(Settings.EMPTY); @@ -1166,7 +1169,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed .build(); final String idxName = "test"; - configureUnicastCluster(settings, 3, null, 2); + configureCluster(settings, 3, null, 2); InternalTestCluster.Async> masterNodes = internalCluster().startMasterOnlyNodesAsync(2); InternalTestCluster.Async dataNode = internalCluster().startDataOnlyNodeAsync(); dataNode.get(); @@ -1195,6 +1198,61 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { assertFalse(client().admin().indices().prepareExists(idxName).get().isExists()); } + public void testElectMasterWithLatestVersion() throws Exception { + configureCluster(3, null, 2); + final Set nodes = new HashSet<>(internalCluster().startNodesAsync(3).get()); + ensureStableCluster(3); + ServiceDisruptionScheme isolateAllNodes = new NetworkDisruption(new NetworkDisruption.IsolateAllNodes(nodes), new NetworkDisconnect()); + internalCluster().setDisruptionScheme(isolateAllNodes); + + logger.info("--> forcing a complete election to make sure \"preferred\" master is elected"); + isolateAllNodes.startDisrupting(); + for (String node: nodes) { + assertNoMaster(node); + } + isolateAllNodes.stopDisrupting(); + ensureStableCluster(3); + final String preferredMasterName = internalCluster().getMasterName(); + final DiscoveryNode preferredMaster = internalCluster().clusterService(preferredMasterName).localNode(); + for (String node: nodes) { + DiscoveryNode discoveryNode = internalCluster().clusterService(node).localNode(); + assertThat(discoveryNode.getId(), greaterThanOrEqualTo(preferredMaster.getId())); + } + + logger.info("--> preferred master is {}", preferredMaster); + final Set nonPreferredNodes = new HashSet<>(nodes); + nonPreferredNodes.remove(preferredMasterName); + final ServiceDisruptionScheme isolatePreferredMaster = + new NetworkDisruption( + new NetworkDisruption.TwoPartitions( + Collections.singleton(preferredMasterName), nonPreferredNodes), + new NetworkDisconnect()); + internalCluster().setDisruptionScheme(isolatePreferredMaster); + isolatePreferredMaster.startDisrupting(); + + assertAcked(client(randomFrom(nonPreferredNodes)).admin().indices().prepareCreate("test").setSettings( + INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1, + INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0 + )); + + internalCluster().clearDisruptionScheme(false); + internalCluster().setDisruptionScheme(isolateAllNodes); + + logger.info("--> forcing a complete election again"); + isolateAllNodes.startDisrupting(); + for (String node: nodes) { + assertNoMaster(node); + } + + isolateAllNodes.stopDisrupting(); + + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + if (state.metaData().hasIndex("test") == false) { + fail("index 'test' was lost. current cluster state: " + state.prettyPrint()); + } + + } + protected NetworkDisruption addRandomDisruptionType(TwoPartitions partitions) { final NetworkLinkDisruptionType disruptionType; if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java index b31b0cbaa55..737607df6be 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; +import org.elasticsearch.discovery.zen.ElectMasterService.MasterCandidate; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -31,6 +31,10 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; public class ElectMasterServiceTests extends ESTestCase { @@ -55,6 +59,22 @@ public class ElectMasterServiceTests extends ESTestCase { return nodes; } + List generateRandomCandidates() { + int count = scaledRandomIntBetween(1, 100); + ArrayList candidates = new ArrayList<>(count); + for (int i = 0; i < count; i++) { + Set roles = new HashSet<>(); + roles.add(DiscoveryNode.Role.MASTER); + DiscoveryNode node = new DiscoveryNode("n_" + i, "n_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(), + roles, Version.CURRENT); + candidates.add(new MasterCandidate(node, randomBoolean() ? MasterCandidate.UNRECOVERED_CLUSTER_VERSION : randomPositiveLong())); + } + + Collections.shuffle(candidates, random()); + return candidates; + } + + public void testSortByMasterLikelihood() { List nodes = generateRandomNodes(); List sortedNodes = electMasterService().sortByMasterLikelihood(nodes); @@ -69,36 +89,53 @@ public class ElectMasterServiceTests extends ESTestCase { } prevNode = node; } + } + public void testTieBreakActiveMasters() { + List nodes = generateRandomCandidates().stream().map(MasterCandidate::getNode).collect(Collectors.toList()); + DiscoveryNode bestMaster = electMasterService().tieBreakActiveMasters(nodes); + for (DiscoveryNode node: nodes) { + if (node.equals(bestMaster) == false) { + assertTrue(bestMaster.getId().compareTo(node.getId()) < 0); + } + } + } + + public void testHasEnoughNodes() { + List nodes = rarely() ? Collections.emptyList() : generateRandomNodes(); + ElectMasterService service = electMasterService(); + int masterNodes = (int) nodes.stream().filter(DiscoveryNode::isMasterNode).count(); + service.minimumMasterNodes(randomIntBetween(-1, masterNodes)); + assertThat(service.hasEnoughMasterNodes(nodes), equalTo(masterNodes > 0)); + service.minimumMasterNodes(masterNodes + 1 + randomIntBetween(0, nodes.size())); + assertFalse(service.hasEnoughMasterNodes(nodes)); + } + + public void testHasEnoughCandidates() { + List candidates = rarely() ? Collections.emptyList() : generateRandomCandidates(); + ElectMasterService service = electMasterService(); + service.minimumMasterNodes(randomIntBetween(-1, candidates.size())); + assertThat(service.hasEnoughCandidates(candidates), equalTo(candidates.size() > 0)); + service.minimumMasterNodes(candidates.size() + 1 + randomIntBetween(0, candidates.size())); + assertFalse(service.hasEnoughCandidates(candidates)); } public void testElectMaster() { - List nodes = generateRandomNodes(); + List candidates = generateRandomCandidates(); ElectMasterService service = electMasterService(); - int min_master_nodes = randomIntBetween(0, nodes.size()); - service.minimumMasterNodes(min_master_nodes); - - int master_nodes = 0; - for (DiscoveryNode node : nodes) { - if (node.isMasterNode()) { - master_nodes++; - } - } - DiscoveryNode master = null; - if (service.hasEnoughMasterNodes(nodes)) { - master = service.electMaster(nodes); - } - - if (master_nodes == 0) { - assertNull(master); - } else if (min_master_nodes > 0 && master_nodes < min_master_nodes) { - assertNull(master); - } else { - assertNotNull(master); - for (DiscoveryNode node : nodes) { - if (node.isMasterNode()) { - assertTrue(master.getId().compareTo(node.getId()) <= 0); - } + int minMasterNodes = randomIntBetween(0, candidates.size()); + service.minimumMasterNodes(minMasterNodes); + MasterCandidate master = service.electMaster(candidates); + assertNotNull(master); + for (MasterCandidate candidate : candidates) { + if (candidate.getNode().equals(master.getNode())) { + // nothing much to test here + } else if (candidate.getClusterStateVersion() == master.getClusterStateVersion()) { + assertThat("candidate " + candidate + " has a lower or equal id than master " + master, candidate.getNode().getId(), + greaterThan(master.getNode().getId())); + } else { + assertThat("candidate " + master + " has a higher cluster state version than candidate " + candidate, + master.getClusterStateVersion(), greaterThan(candidate.getClusterStateVersion())); } } } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 0acba3c420f..ca75ea960ad 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.membership.MembershipAction; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java index 35335a8ede4..1e8954330cd 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.transport.LocalTransportAddress; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index b7aaf279582..d9a8c9be7f4 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -34,14 +34,12 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.fd.FaultDetection; import org.elasticsearch.discovery.zen.membership.MembershipAction; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; @@ -60,10 +58,8 @@ import org.hamcrest.Matchers; import org.junit.Before; import java.io.IOException; -import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; -import java.util.Collections; import java.util.EnumSet; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -77,8 +73,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.sameInstance; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) @ESIntegTestCase.SuppressLocalMode @@ -293,44 +287,6 @@ public class ZenDiscoveryIT extends ESIntegTestCase { } } - public void testHandleNodeJoin_incompatibleMinVersion() throws UnknownHostException { - Settings nodeSettings = Settings.builder() - .put("discovery.type", "zen") // <-- To override the local setting if set externally - .build(); - String nodeName = internalCluster().startNode(nodeSettings); - ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName); - DiscoveryNode node = new DiscoveryNode("_node_id", new InetSocketTransportAddress(InetAddress.getByName("0.0.0.0"), 0), - emptyMap(), emptySet(), previousMajorVersion); - final AtomicReference holder = new AtomicReference<>(); - zenDiscovery.handleJoinRequest(node, clusterService.state(), new MembershipAction.JoinCallback() { - @Override - public void onSuccess() { - } - - @Override - public void onFailure(Exception e) { - holder.set((IllegalStateException) e); - } - }); - - assertThat(holder.get(), notNullValue()); - assertThat(holder.get().getMessage(), equalTo("Can't handle join request from a node with a version [" + previousMajorVersion - + "] that is lower than the minimum compatible version [" + Version.CURRENT.minimumCompatibilityVersion() + "]")); - } - - public void testJoinElectedMaster_incompatibleMinVersion() { - ElectMasterService electMasterService = new ElectMasterService(Settings.EMPTY); - - DiscoveryNode node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), emptyMap(), - Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT); - assertThat(electMasterService.electMaster(Collections.singletonList(node)), sameInstance(node)); - node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), emptyMap(), emptySet(), previousMajorVersion); - assertThat("Can't join master because version " + previousMajorVersion - + " is lower than the minimum compatable version " + Version.CURRENT + " can support", - electMasterService.electMaster(Collections.singletonList(node)), nullValue()); - } - public void testDiscoveryStats() throws IOException { String expectedStatsJsonResponse = "{\n" + " \"discovery\" : {\n" + diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index b9f65016048..a7291dc3736 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.AssertingAckListener; @@ -55,8 +54,8 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreOrRejectNewClusterState; -import static org.elasticsearch.discovery.zen.elect.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.containsString; @@ -128,7 +127,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { Set roles = new HashSet<>(randomSubsetOf(Arrays.asList(Role.values()))); DiscoveryNode node = new DiscoveryNode("node_" + i, "id_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(), roles, Version.CURRENT); - responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomBoolean())); + responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomLong())); allNodes.add(node); if (node.isMasterNode()) { masterNodes.add(node); @@ -136,8 +135,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { } boolean ignore = randomBoolean(); - List filtered = ZenDiscovery.filterPingResponses( - responses.toArray(new ZenPing.PingResponse[responses.size()]), ignore, logger); + List filtered = ZenDiscovery.filterPingResponses(responses, ignore, logger); final List filteredNodes = filtered.stream().map(ZenPing.PingResponse::node).collect(Collectors.toList()); if (ignore) { assertThat(filteredNodes, equalTo(masterNodes)); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java index 72674f44e3d..2275756e8ee 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; import java.util.Collections; +import java.util.List; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -39,7 +40,7 @@ public class ZenPingTests extends ESTestCase { DiscoveryNode[] nodes = new DiscoveryNode[randomIntBetween(1, 30)]; long maxIdPerNode[] = new long[nodes.length]; DiscoveryNode masterPerNode[] = new DiscoveryNode[nodes.length]; - boolean hasJoinedOncePerNode[] = new boolean[nodes.length]; + long clusterStateVersionPerNode[] = new long[nodes.length]; ArrayList pings = new ArrayList<>(); for (int i = 0; i < nodes.length; i++) { nodes[i] = new DiscoveryNode("" + i, LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); @@ -51,9 +52,9 @@ public class ZenPingTests extends ESTestCase { if (randomBoolean()) { masterNode = nodes[randomInt(nodes.length - 1)]; } - boolean hasJoinedOnce = randomBoolean(); + long clusterStateVersion = randomLong(); ZenPing.PingResponse ping = new ZenPing.PingResponse(nodes[node], masterNode, ClusterName.CLUSTER_NAME_SETTING. - getDefault(Settings.EMPTY), hasJoinedOnce); + getDefault(Settings.EMPTY), clusterStateVersion); if (rarely()) { // ignore some pings continue; @@ -61,7 +62,7 @@ public class ZenPingTests extends ESTestCase { // update max ping info maxIdPerNode[node] = ping.id(); masterPerNode[node] = masterNode; - hasJoinedOncePerNode[node] = hasJoinedOnce; + clusterStateVersionPerNode[node] = clusterStateVersion; pings.add(ping); } @@ -69,15 +70,15 @@ public class ZenPingTests extends ESTestCase { Collections.shuffle(pings, random()); ZenPing.PingCollection collection = new ZenPing.PingCollection(); - collection.addPings(pings.toArray(new ZenPing.PingResponse[pings.size()])); + collection.addPings(pings); - ZenPing.PingResponse[] aggregate = collection.toArray(); + List aggregate = collection.toList(); for (ZenPing.PingResponse ping : aggregate) { int nodeId = Integer.parseInt(ping.node().getId()); assertThat(maxIdPerNode[nodeId], equalTo(ping.id())); assertThat(masterPerNode[nodeId], equalTo(ping.master())); - assertThat(hasJoinedOncePerNode[nodeId], equalTo(ping.hasJoinedOnce())); + assertThat(clusterStateVersionPerNode[nodeId], equalTo(ping.getClusterStateVersion())); maxIdPerNode[nodeId] = -1; // mark as seen } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java similarity index 85% rename from core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java rename to core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java index ea5779c33bb..e04b0b52d81 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java @@ -20,6 +20,9 @@ package org.elasticsearch.discovery.zen.ping.unicast; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -31,7 +34,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ping.PingContextProvider; import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -45,16 +48,18 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; import java.net.InetSocketAddress; +import java.util.Collection; import java.util.Collections; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -public class UnicastZenPingIT extends ESTestCase { +public class UnicastZenPingTests extends ESTestCase { public void testSimplePings() throws InterruptedException { int startPort = 11000 + randomIntBetween(0, 1000); int endPort = startPort + 10; @@ -78,6 +83,8 @@ public class UnicastZenPingIT extends ESTestCase { Version versionD = VersionUtils.randomVersionBetween(random(), previousVersion.minimumCompatibilityVersion(), previousVersion); NetworkHandle handleD = startServices(settingsMismatch, threadPool, networkService, "UZP_D", versionD); + final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomPositiveLong()).build(); + Settings hostsSettings = Settings.builder() .putArray("discovery.zen.ping.unicast.hosts", NetworkAddress.format(new InetSocketAddress(handleA.address.address().getAddress(), handleA.address.address().getPort())), @@ -96,8 +103,8 @@ public class UnicastZenPingIT extends ESTestCase { } @Override - public boolean nodeHasJoinedClusterOnce() { - return false; + public ClusterState clusterState() { + return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build(); } }); zenPingA.start(); @@ -110,8 +117,8 @@ public class UnicastZenPingIT extends ESTestCase { } @Override - public boolean nodeHasJoinedClusterOnce() { - return true; + public ClusterState clusterState() { + return state; } }); zenPingB.start(); @@ -130,8 +137,8 @@ public class UnicastZenPingIT extends ESTestCase { } @Override - public boolean nodeHasJoinedClusterOnce() { - return false; + public ClusterState clusterState() { + return state; } }); zenPingC.start(); @@ -144,36 +151,38 @@ public class UnicastZenPingIT extends ESTestCase { } @Override - public boolean nodeHasJoinedClusterOnce() { - return false; + public ClusterState clusterState() { + return state; } }); zenPingD.start(); try { logger.info("ping from UZP_A"); - ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1)); - assertThat(pingResponses.length, equalTo(1)); - assertThat(pingResponses[0].node().getId(), equalTo("UZP_B")); - assertTrue(pingResponses[0].hasJoinedOnce()); + Collection pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1)); + assertThat(pingResponses.size(), equalTo(1)); + ZenPing.PingResponse ping = pingResponses.iterator().next(); + assertThat(ping.node().getId(), equalTo("UZP_B")); + assertThat(ping.getClusterStateVersion(), equalTo(state.version())); assertCounters(handleA, handleA, handleB, handleC, handleD); // ping again, this time from B, logger.info("ping from UZP_B"); pingResponses = zenPingB.pingAndWait(TimeValue.timeValueSeconds(1)); - assertThat(pingResponses.length, equalTo(1)); - assertThat(pingResponses[0].node().getId(), equalTo("UZP_A")); - assertFalse(pingResponses[0].hasJoinedOnce()); + assertThat(pingResponses.size(), equalTo(1)); + ping = pingResponses.iterator().next(); + assertThat(ping.node().getId(), equalTo("UZP_A")); + assertThat(ping.getClusterStateVersion(), equalTo(ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION)); assertCounters(handleB, handleA, handleB, handleC, handleD); logger.info("ping from UZP_C"); pingResponses = zenPingC.pingAndWait(TimeValue.timeValueSeconds(1)); - assertThat(pingResponses.length, equalTo(0)); + assertThat(pingResponses.size(), equalTo(0)); assertCounters(handleC, handleA, handleB, handleC, handleD); logger.info("ping from UZP_D"); pingResponses = zenPingD.pingAndWait(TimeValue.timeValueSeconds(1)); - assertThat(pingResponses.length, equalTo(0)); + assertThat(pingResponses.size(), equalTo(0)); assertCounters(handleD, handleA, handleB, handleC, handleD); } finally { zenPingA.close(); diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 9b340fd863a..a998b56f640 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -38,7 +38,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.mapper.MapperParsingException; diff --git a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java index a1d16bfd884..c820bccae51 100644 --- a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index f512f1da538..3a045c80ac8 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -44,8 +44,8 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.ttl.IndicesTTLService; diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index 258b880f88f..bb2f384bc17 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -63,6 +63,22 @@ to create new scenarios. We have currently ported all published Jepsen scenarios framework. As the Jepsen tests evolve, we will continue porting new scenarios that are not covered yet. We are committed to investigating all new scenarios and will report issues that we find on this page and in our GitHub repository. +[float] +=== Repeated network partitions can cause cluster state updates to be lost (STATUS: ONGOING) + +During a networking partition, cluster state updates (like mapping changes or shard assignments) +are committed if a majority of the master-eligible nodes received the update correctly. This means that the current master has access +to enough nodes in the cluster to continue to operate correctly. When the network partition heals, the isolated nodes catch +up with the current state and receive the previously missed changes. However, if a second partition happens while the cluster +is still recovering from the previous one *and* the old master falls on the minority side, it may be that a new master is elected +which has not yet catch up. If that happens, cluster state updates can be lost. + +This problem is mostly fixed by {GIT}20384[#20384] (v5.0.0), which takes committed cluster state updates into account during master +election. This considerably reduces the chance of this rare problem occurring but does not fully mitigate it. If the second partition +happens concurrently with a cluster state update and blocks the cluster state commit message from reaching a majority of nodes, it may be +that the in flight update will be lost. If the now-isolated master can still acknowledge the cluster state update to the client this +will amount to the loss of an acknowledged change. Fixing that last scenario needs considerable work and is currently targeted at (v6.0.0). + [float] === Better request retry mechanism when nodes are disconnected (STATUS: ONGOING) diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 833c27f9c55..59669ba8478 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -100,7 +100,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; @@ -368,14 +368,14 @@ public abstract class ESIntegTestCase extends ESTestCase { // TODO move settings for random directory etc here into the index based randomized settings. if (cluster().size() > 0) { Settings.Builder randomSettingsBuilder = - setRandomIndexSettings(random(), Settings.builder()); + setRandomIndexSettings(random(), Settings.builder()); if (isInternalCluster()) { // this is only used by mock plugins and if the cluster is not internal we just can't set it randomSettingsBuilder.put(INDEX_TEST_SEED_SETTING.getKey(), random().nextLong()); } randomSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, numberOfShards()) - .put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas()); + .put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas()); // if the test class is annotated with SuppressCodecs("*"), it means don't use lucene's codec randomization // otherwise, use it, it has assertions and so on that can find bugs. @@ -404,10 +404,10 @@ public abstract class ESIntegTestCase extends ESTestCase { randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), randomBoolean()); } PutIndexTemplateRequestBuilder putTemplate = client().admin().indices() - .preparePutTemplate("random_index_template") - .setTemplate("*") - .setOrder(0) - .setSettings(randomSettingsBuilder); + .preparePutTemplate("random_index_template") + .setTemplate("*") + .setOrder(0) + .setSettings(randomSettingsBuilder); if (mappings != null) { logger.info("test using _default_ mappings: [{}]", mappings.bytes().utf8ToString()); putTemplate.addMapping("_default_", mappings); @@ -443,7 +443,7 @@ public abstract class ESIntegTestCase extends ESTestCase { private static Settings.Builder setRandomIndexMergeSettings(Random random, Settings.Builder builder) { if (random.nextBoolean()) { builder.put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), - random.nextBoolean() ? random.nextDouble() : random.nextBoolean()); + random.nextBoolean() ? random.nextDouble() : random.nextBoolean()); } switch (random.nextInt(4)) { case 3: @@ -525,9 +525,9 @@ public abstract class ESIntegTestCase extends ESTestCase { if (currentClusterScope != Scope.TEST) { MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().getMetaData(); assertThat("test leaves persistent cluster metadata behind: " + metaData.persistentSettings().getAsMap(), metaData - .persistentSettings().getAsMap().size(), equalTo(0)); + .persistentSettings().getAsMap().size(), equalTo(0)); assertThat("test leaves transient cluster metadata behind: " + metaData.transientSettings().getAsMap(), metaData - .transientSettings().getAsMap().size(), equalTo(0)); + .transientSettings().getAsMap().size(), equalTo(0)); } ensureClusterSizeConsistency(); ensureClusterStateConsistency(); @@ -540,7 +540,7 @@ public abstract class ESIntegTestCase extends ESTestCase { @Override public void run() { assertThat("still having pending states: " + Strings.arrayToDelimitedString(zenDiscovery.pendingClusterStates(), "\n"), - zenDiscovery.pendingClusterStates(), emptyArray()); + zenDiscovery.pendingClusterStates(), emptyArray()); } }); } @@ -829,7 +829,7 @@ public abstract class ESIntegTestCase extends ESTestCase { String failMsg = sb.toString(); for (SearchHit hit : searchResponse.getHits().getHits()) { sb.append("\n-> _index: [").append(hit.getIndex()).append("] type [").append(hit.getType()) - .append("] id [").append(hit.id()).append("]"); + .append("] id [").append(hit.id()).append("]"); } logger.warn("{}", sb); fail(failMsg); @@ -873,7 +873,7 @@ public abstract class ESIntegTestCase extends ESTestCase { */ public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) { ClusterHealthResponse actionGet = client().admin().cluster() - .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForNoRelocatingShards(true)).actionGet(); + .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForNoRelocatingShards(true)).actionGet(); if (actionGet.isTimedOut()) { logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); fail("timed out waiting for green state"); @@ -900,7 +900,7 @@ public abstract class ESIntegTestCase extends ESTestCase { request.waitForStatus(status); } ClusterHealthResponse actionGet = client().admin().cluster() - .health(request).actionGet(); + .health(request).actionGet(); if (actionGet.isTimedOut()) { logger.info("waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false)); @@ -945,7 +945,7 @@ public abstract class ESIntegTestCase extends ESTestCase { * @return the actual number of docs seen. */ public long waitForDocs(final long numDocs, int maxWaitTime, TimeUnit maxWaitTimeUnit, @Nullable final BackgroundIndexer indexer) - throws InterruptedException { + throws InterruptedException { final AtomicLong lastKnownCount = new AtomicLong(-1); long lastStartCount = -1; BooleanSupplier testDocs = () -> { @@ -988,8 +988,8 @@ public abstract class ESIntegTestCase extends ESTestCase { */ public void setMinimumMasterNodes(int n) { assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings( - Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), n)) - .get().isAcknowledged()); + Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), n)) + .get().isAcknowledged()); } /** @@ -997,7 +997,7 @@ public abstract class ESIntegTestCase extends ESTestCase { */ public ClusterHealthStatus ensureYellow(String... indices) { ClusterHealthResponse actionGet = client().admin().cluster() - .health(Requests.clusterHealthRequest(indices).waitForNoRelocatingShards(true).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet(); + .health(Requests.clusterHealthRequest(indices).waitForNoRelocatingShards(true).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet(); if (actionGet.isTimedOut()) { logger.info("ensureYellow timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); assertThat("timed out waiting for yellow", actionGet.isTimedOut(), equalTo(false)); @@ -1019,7 +1019,7 @@ public abstract class ESIntegTestCase extends ESTestCase { public void logSegmentsState(String... indices) throws Exception { IndicesSegmentResponse segsRsp = client().admin().indices().prepareSegments(indices).get(); logger.debug("segments {} state: \n{}", indices.length == 0 ? "[_all]" : indices, - segsRsp.toXContent(JsonXContent.contentBuilder().prettyPrint(), ToXContent.EMPTY_PARAMS).string()); + segsRsp.toXContent(JsonXContent.contentBuilder().prettyPrint(), ToXContent.EMPTY_PARAMS).string()); } /** @@ -1102,16 +1102,16 @@ public abstract class ESIntegTestCase extends ESTestCase { } logger.debug("ensuring cluster is stable with [{}] nodes. access node: [{}]. timeout: [{}]", nodeCount, viaNode, timeValue); ClusterHealthResponse clusterHealthResponse = client(viaNode).admin().cluster().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setWaitForNodes(Integer.toString(nodeCount)) - .setTimeout(timeValue) - .setLocal(local) - .setWaitForNoRelocatingShards(true) - .get(); + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes(Integer.toString(nodeCount)) + .setTimeout(timeValue) + .setLocal(local) + .setWaitForNoRelocatingShards(true) + .get(); if (clusterHealthResponse.isTimedOut()) { ClusterStateResponse stateResponse = client(viaNode).admin().cluster().prepareState().get(); fail("failed to reach a stable cluster of [" + nodeCount + "] nodes. Tried via [" + viaNode + "]. last cluster state:\n" - + stateResponse.getState().prettyPrint()); + + stateResponse.getState().prettyPrint()); } assertThat(clusterHealthResponse.isTimedOut(), is(false)); } @@ -1234,7 +1234,7 @@ public abstract class ESIntegTestCase extends ESTestCase { */ protected final void enableAllocation(String... indices) { client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put( - EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all" + EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all" )).get(); } @@ -1243,7 +1243,7 @@ public abstract class ESIntegTestCase extends ESTestCase { */ protected final void disableAllocation(String... indices) { client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put( - EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none" + EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none" )).get(); } @@ -1357,7 +1357,7 @@ public abstract class ESIntegTestCase extends ESTestCase { } } else { List> partition = eagerPartition(builders, Math.min(MAX_BULK_INDEX_REQUEST_SIZE, - Math.max(1, (int) (builders.size() * randomDouble())))); + Math.max(1, (int) (builders.size() * randomDouble())))); logger.info("Index [{}] docs async: [{}] bulk: [{}] partitions [{}]", builders.size(), false, true, partition.size()); for (List segmented : partition) { BulkRequestBuilder bulkBuilder = client().prepareBulk(); @@ -1426,18 +1426,18 @@ public abstract class ESIntegTestCase extends ESTestCase { if (rarely()) { if (rarely()) { client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute( - new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); + new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } else if (maybeFlush && rarely()) { if (randomBoolean()) { client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute( - new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); + new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } else { client().admin().indices().syncedFlush(syncedFlushRequest(indices).indicesOptions(IndicesOptions.lenientExpandOpen()), new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } } else if (rarely()) { client().admin().indices().prepareForceMerge(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).setMaxNumSegments(between(1, 10)).setFlush(maybeFlush && randomBoolean()).execute( - new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); + new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } } while (inFlightAsyncOperations.size() > MAX_IN_FLIGHT_ASYNC_INDEXES) { @@ -1567,7 +1567,7 @@ public abstract class ESIntegTestCase extends ESTestCase { */ public void clearScroll(String... scrollIds) { ClearScrollResponse clearResponse = client().prepareClearScroll() - .setScrollIds(Arrays.asList(scrollIds)).get(); + .setScrollIds(Arrays.asList(scrollIds)).get(); assertThat(clearResponse.isSucceeded(), equalTo(true)); } @@ -1631,20 +1631,20 @@ public abstract class ESIntegTestCase extends ESTestCase { */ protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE) - // Default the watermarks to absurdly low to prevent the tests - // from failing on nodes without enough disk space - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") - .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000) - .put("script.stored", "true") - .put("script.inline", "true") - // by default we never cache below 10k docs in a segment, - // bypass this limit so that caching gets some testing in - // integration tests that usually create few documents - .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), nodeOrdinal % 2 == 0) - // wait short time for other active shards before actually deleting, default 30s not needed in tests - .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS)); + .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE) + // Default the watermarks to absurdly low to prevent the tests + // from failing on nodes without enough disk space + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000) + .put("script.stored", "true") + .put("script.inline", "true") + // by default we never cache below 10k docs in a segment, + // bypass this limit so that caching gets some testing in + // integration tests that usually create few documents + .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), nodeOrdinal % 2 == 0) + // wait short time for other active shards before actually deleting, default 30s not needed in tests + .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS)); return builder.build(); } @@ -1739,8 +1739,8 @@ public abstract class ESIntegTestCase extends ESTestCase { mockPlugins = mocks; } return new InternalTestCluster(seed, createTempDir(), supportsDedicatedMasters, minNumDataNodes, maxNumDataNodes, - InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(), - InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins, getClientWrapper()); + InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(), + InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins, getClientWrapper()); } protected NodeConfigurationSource getNodeConfigSource() { @@ -1772,7 +1772,7 @@ public abstract class ESIntegTestCase extends ESTestCase { .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), isNetwork ? DiscoveryModule.DISCOVERY_TYPE_SETTING.getDefault(Settings.EMPTY) : "local") .put(networkSettings.build()). - put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build(); + put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build(); } @Override @@ -2071,8 +2071,8 @@ public abstract class ESIntegTestCase extends ESTestCase { assertFalse(Files.exists(src)); assertTrue(Files.exists(dest)); Settings.Builder builder = Settings.builder() - .put(settings) - .put(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath()); + .put(settings) + .put(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath()); Path configDir = indexDir.resolve("config"); if (Files.exists(configDir)) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index e49e6d4aa40..6a5493ff1eb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1660,10 +1660,18 @@ public final class InternalTestCluster extends TestCluster { } public void clearDisruptionScheme() { + clearDisruptionScheme(true); + } + + public void clearDisruptionScheme(boolean ensureHealthyCluster) { if (activeDisruptionScheme != null) { TimeValue expectedHealingTime = activeDisruptionScheme.expectedTimeToHeal(); logger.info("Clearing active scheme {}, expected healing time {}", activeDisruptionScheme, expectedHealingTime); - activeDisruptionScheme.removeAndEnsureHealthy(this); + if (ensureHealthyCluster) { + activeDisruptionScheme.removeAndEnsureHealthy(this); + } else { + activeDisruptionScheme.removeFromCluster(this); + } } activeDisruptionScheme = null; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java index 4e135c4c2b0..f7094d8ae9f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java @@ -328,6 +328,18 @@ public class NetworkDisruption implements ServiceDisruptionScheme { } } + public static class IsolateAllNodes extends DisruptedLinks { + + public IsolateAllNodes(Set nodes) { + super(nodes); + } + + @Override + public boolean disrupt(String node1, String node2) { + return true; + } + } + /** * Abstract class representing various types of network disruptions. Instances of this class override the {@link #applyDisruption} * method to apply their specific disruption type to requests that are send from a source to a target node. diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruptionTests.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruptionTests.java index 4d0f1123a1b..edc261c1759 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruptionTests.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruptionTests.java @@ -56,6 +56,21 @@ public class NetworkDisruptionTests extends ESTestCase { assertTrue(topology.getMajoritySide().size() >= topology.getMinoritySide().size()); } + public void testIsolateAll() { + Set nodes = generateRandomStringSet(1, 10); + NetworkDisruption.DisruptedLinks topology = new NetworkDisruption.IsolateAllNodes(nodes); + for (int i = 0; i < 10; i++) { + final String node1 = randomFrom(nodes); + final String node2 = randomFrom(nodes); + if (node1.equals(node2)) { + continue; + } + assertTrue(topology.nodes().contains(node1)); + assertTrue(topology.nodes().contains(node2)); + assertTrue(topology.disrupt(node1, node2)); + } + } + public void testBridge() { Set partition1 = generateRandomStringSet(1, 10); Set partition2 = generateRandomStringSet(1, 10); From f5daa165f12a9ef84006ba16d89c8baf1efe4b94 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 16 Sep 2016 09:47:53 +0200 Subject: [PATCH 12/25] Remove ability to plug-in TransportService (#20505) TransportService is such a central part of the core server, replacing it's implementation is risky and can cause serious issues. This change removes the ability to plug in TransportService but allows registering a TransportInterceptor that enables plugins to intercept requests on both the sender and the receiver ends. This is a commonly used and overwritten functionality but encapsulates the custom code in a contained manner. --- .../common/network/NetworkModule.java | 54 +++++-- .../common/settings/ClusterSettings.java | 1 - .../java/org/elasticsearch/node/Node.java | 6 +- .../transport/TransportInterceptor.java | 60 ++++++++ .../transport/TransportService.java | 69 ++++----- .../action/IndicesRequestIT.java | 68 ++++----- .../node/tasks/TaskManagerTestCase.java | 2 +- .../bulk/TransportBulkActionTookTests.java | 3 +- .../ingest/IngestProxyActionFilterTests.java | 117 +++++++++------ .../action/main/MainActionTests.java | 3 +- .../TransportMultiSearchActionTests.java | 8 +- .../TransportBroadcastByNodeActionTests.java | 3 +- .../TransportMasterNodeActionTests.java | 2 +- .../nodes/TransportNodesActionTests.java | 3 +- .../BroadcastReplicationTests.java | 3 +- .../TransportReplicationActionTests.java | 3 +- .../TransportWriteActionTests.java | 3 +- ...ortInstanceSingleOperationActionTests.java | 2 +- .../client/AbstractClientHeadersTestCase.java | 11 +- .../TransportClientHeadersTests.java | 137 ++++++++++-------- .../TransportClientNodesServiceTests.java | 36 ++--- .../cluster/NodeConnectionsServiceTests.java | 2 +- .../action/shard/ShardStateActionTests.java | 2 +- .../health/ClusterStateHealthTests.java | 3 +- .../common/network/NetworkModuleTests.java | 48 +++--- .../discovery/ZenFaultDetectionTests.java | 2 +- .../zen/ping/unicast/UnicastZenPingTests.java | 3 +- .../mapper/DynamicMappingDisabledTests.java | 3 +- .../indices/cluster/ClusterStateChanges.java | 3 +- ...ClusterStateServiceRandomUpdatesTests.java | 3 +- .../indices/store/IndicesStoreTests.java | 2 +- .../transport/TransportModuleTests.java | 51 ------- .../TransportServiceHandshakeTests.java | 3 +- .../Netty3SizeHeaderFrameDecoderTests.java | 3 +- .../netty3/Netty3ScheduledPingTests.java | 5 +- .../netty3/SimpleNetty3TransportTests.java | 3 +- .../netty4/Netty4ScheduledPingTests.java | 5 +- .../netty4/SimpleNetty4TransportTests.java | 3 +- .../FileBasedUnicastHostsProviderTests.java | 3 +- .../java/org/elasticsearch/node/MockNode.java | 21 +++ .../org/elasticsearch/test/ExternalNode.java | 1 - .../test/transport/MockTransportService.java | 18 +-- .../transport/MockTransportClient.java | 1 - .../transport/MockTcpTransportTests.java | 3 +- 44 files changed, 448 insertions(+), 337 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/transport/TransportInterceptor.java delete mode 100644 core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java index bb4a4bd3b30..2ba236fb417 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -42,11 +42,15 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.tasks.RawTaskStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.local.LocalTransport; import java.util.ArrayList; import java.util.List; +import java.util.Objects; /** * A module to handle registering and binding all network related classes. @@ -54,7 +58,6 @@ import java.util.List; public class NetworkModule extends AbstractModule { public static final String TRANSPORT_TYPE_KEY = "transport.type"; - public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type"; public static final String HTTP_TYPE_KEY = "http.type"; public static final String LOCAL_TRANSPORT = "local"; public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default"; @@ -65,8 +68,6 @@ public class NetworkModule extends AbstractModule { public static final Setting HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope); public static final Setting HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope); public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope); - public static final Setting TRANSPORT_SERVICE_TYPE_SETTING = - Setting.simpleString(TRANSPORT_SERVICE_TYPE_KEY, Property.NodeScope); public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope); private final NetworkService networkService; @@ -74,10 +75,10 @@ public class NetworkModule extends AbstractModule { private final boolean transportClient; private final AllocationCommandRegistry allocationCommandRegistry = new AllocationCommandRegistry(); - private final ExtensionPoint.SelectedType transportServiceTypes = new ExtensionPoint.SelectedType<>("transport_service", TransportService.class); private final ExtensionPoint.SelectedType transportTypes = new ExtensionPoint.SelectedType<>("transport", Transport.class); private final ExtensionPoint.SelectedType httpTransportTypes = new ExtensionPoint.SelectedType<>("http_transport", HttpServerTransport.class); private final List namedWriteables = new ArrayList<>(); + private final List transportIntercetors = new ArrayList<>(); /** * Creates a network module that custom networking classes can be plugged into. @@ -89,7 +90,6 @@ public class NetworkModule extends AbstractModule { this.networkService = networkService; this.settings = settings; this.transportClient = transportClient; - registerTransportService("default", TransportService.class); registerTransport(LOCAL_TRANSPORT, LocalTransport.class); namedWriteables.add(new NamedWriteableRegistry.Entry(Task.Status.class, ReplicationTask.Status.NAME, ReplicationTask.Status::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(Task.Status.class, RawTaskStatus.NAME, RawTaskStatus::new)); @@ -100,11 +100,6 @@ public class NetworkModule extends AbstractModule { return transportClient; } - /** Adds a transport service implementation that can be selected by setting {@link #TRANSPORT_SERVICE_TYPE_KEY}. */ - public void registerTransportService(String name, Class clazz) { - transportServiceTypes.registerExtension(name, clazz); - } - /** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */ public void registerTransport(String name, Class clazz) { transportTypes.registerExtension(name, clazz); @@ -149,9 +144,9 @@ public class NetworkModule extends AbstractModule { @Override protected void configure() { bind(NetworkService.class).toInstance(networkService); - transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, "default"); + bindTransportService(); transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, TRANSPORT_DEFAULT_TYPE_SETTING.get(settings)); - + bind(TransportInterceptor.class).toInstance(new CompositeTransportInterceptor(this.transportIntercetors)); if (transportClient == false) { if (HTTP_ENABLED.get(settings)) { bind(HttpServer.class).asEagerSingleton(); @@ -181,4 +176,39 @@ public class NetworkModule extends AbstractModule { public boolean canRegisterHttpExtensions() { return transportClient == false; } + + /** + * Registers a new {@link TransportInterceptor} + */ + public void addTransportInterceptor(TransportInterceptor interceptor) { + this.transportIntercetors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); + } + + static final class CompositeTransportInterceptor implements TransportInterceptor { + final List transportInterceptors; + + private CompositeTransportInterceptor(List transportInterceptors) { + this.transportInterceptors = new ArrayList<>(transportInterceptors); + } + + @Override + public TransportRequestHandler interceptHandler(String action, TransportRequestHandler actualHandler) { + for (TransportInterceptor interceptor : this.transportInterceptors) { + actualHandler = interceptor.interceptHandler(action, actualHandler); + } + return actualHandler; + } + + @Override + public AsyncSender interceptSender(AsyncSender sender) { + for (TransportInterceptor interceptor : this.transportInterceptors) { + sender = interceptor.interceptSender(sender); + } + return sender; + } + } + + protected void bindTransportService() { + bind(TransportService.class).asEagerSingleton(); + } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 1256021b96e..c1841d11fbf 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -226,7 +226,6 @@ public final class ClusterSettings extends AbstractScopedSettings { NetworkModule.HTTP_DEFAULT_TYPE_SETTING, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING, NetworkModule.HTTP_TYPE_SETTING, - NetworkModule.TRANSPORT_SERVICE_TYPE_SETTING, NetworkModule.TRANSPORT_TYPE_SETTING, HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS, HttpTransportSettings.SETTING_CORS_ENABLED, diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 4368d51e346..9684b535485 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -321,7 +321,7 @@ public class Node implements Closeable { } final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool); modules.add(new NodeModule(this, monitorService)); - NetworkModule networkModule = new NetworkModule(networkService, settings, false); + NetworkModule networkModule = createNetworkModule(settings, networkService); modules.add(networkModule); modules.add(new DiscoveryModule(this.settings)); ClusterModule clusterModule = new ClusterModule(settings, clusterService, @@ -417,6 +417,10 @@ public class Node implements Closeable { } } + protected NetworkModule createNetworkModule(Settings settings, NetworkService networkService) { + return new NetworkModule(networkService, settings, false); + } + /** * The settings that were used to create the node. */ diff --git a/core/src/main/java/org/elasticsearch/transport/TransportInterceptor.java b/core/src/main/java/org/elasticsearch/transport/TransportInterceptor.java new file mode 100644 index 00000000000..d8072a81ba6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/transport/TransportInterceptor.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.cluster.node.DiscoveryNode; + +import java.util.function.Supplier; + +/** + * This interface allows plugins to intercept requests on both the sender and the receiver side. + */ +public interface TransportInterceptor { + /** + * This is called for each handler that is registered via + * {@link TransportService#registerRequestHandler(String, Supplier, String, boolean, boolean, TransportRequestHandler)} or + * {@link TransportService#registerRequestHandler(String, Supplier, String, TransportRequestHandler)}. The returned handler is + * used instead of the passed in handler. By default the provided handler is returned. + */ + default TransportRequestHandler interceptHandler(String action, + TransportRequestHandler actualHandler) { + return actualHandler; + } + + /** + * This is called up-front providing the actual low level {@link AsyncSender} that performs the low level send request. + * The returned sender is used to send all requests that come in via + * {@link TransportService#sendRequest(DiscoveryNode, String, TransportRequest, TransportResponseHandler)} or + * {@link TransportService#sendRequest(DiscoveryNode, String, TransportRequest, TransportRequestOptions, TransportResponseHandler)}. + * This allows plugins to perform actions on each send request including modifying the request context etc. + */ + default AsyncSender interceptSender(AsyncSender sender) { + return sender; + } + + /** + * A simple interface to decorate + * {@link #sendRequest(DiscoveryNode, String, TransportRequest, TransportRequestOptions, TransportResponseHandler)} + */ + interface AsyncSender { + void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request, + final TransportRequestOptions options, TransportResponseHandler handler); + } +} diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 20b8c77d44a..8c5886f7311 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -66,9 +66,6 @@ import java.util.function.Supplier; import static java.util.Collections.emptyList; import static org.elasticsearch.common.settings.Setting.listSetting; -/** - * - */ public class TransportService extends AbstractLifecycleComponent { public static final String DIRECT_RESPONSE_PROFILE = ".direct"; @@ -79,16 +76,19 @@ public class TransportService extends AbstractLifecycleComponent { protected final ThreadPool threadPool; protected final ClusterName clusterName; protected final TaskManager taskManager; + private final TransportInterceptor.AsyncSender asyncSender; volatile Map requestHandlers = Collections.emptyMap(); final Object requestHandlerMutex = new Object(); final ConcurrentMapLong clientHandlers = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); - final AtomicLong requestIds = new AtomicLong(); + private final AtomicLong requestIds = new AtomicLong(); final CopyOnWriteArrayList connectionListeners = new CopyOnWriteArrayList<>(); + private final TransportInterceptor interceptor; + // An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they // do show up, we can print more descriptive information about them final Map timeoutInfoHandlers = @@ -101,6 +101,8 @@ public class TransportService extends AbstractLifecycleComponent { private final TransportService.Adapter adapter; + public static final TransportInterceptor NOOP_TRANSPORT_INTERCEPTOR = new TransportInterceptor() {}; + // tracer log public static final Setting> TRACE_LOG_INCLUDE_SETTING = @@ -118,7 +120,7 @@ public class TransportService extends AbstractLifecycleComponent { volatile DiscoveryNode localNode = null; @Inject - public TransportService(Settings settings, Transport transport, ThreadPool threadPool) { + public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor) { super(settings); this.transport = transport; this.threadPool = threadPool; @@ -128,6 +130,8 @@ public class TransportService extends AbstractLifecycleComponent { tracerLog = Loggers.getLogger(logger, ".tracer"); adapter = createAdapter(); taskManager = createTaskManager(); + this.interceptor = transportInterceptor; + this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); } /** @@ -241,11 +245,11 @@ public class TransportService extends AbstractLifecycleComponent { * when the transport layer starts up it will block any incoming requests until * this method is called */ - public void acceptIncomingRequests() { + public final void acceptIncomingRequests() { blockIncomingRequestsLatch.countDown(); } - public boolean addressSupported(Class address) { + public final boolean addressSupported(Class address) { return transport.addressSupported(address); } @@ -442,13 +446,23 @@ public class TransportService extends AbstractLifecycleComponent { return futureHandler; } - public void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request, - final TransportResponseHandler handler) { + public final void sendRequest(final DiscoveryNode node, final String action, + final TransportRequest request, + final TransportResponseHandler handler) { sendRequest(node, action, request, TransportRequestOptions.EMPTY, handler); } - public void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request, - final TransportRequestOptions options, TransportResponseHandler handler) { + public final void sendRequest(final DiscoveryNode node, final String action, + final TransportRequest request, + final TransportRequestOptions options, + TransportResponseHandler handler) { + asyncSender.sendRequest(node, action, request, options, handler); + } + + private void sendRequestInternal(final DiscoveryNode node, final String action, + final TransportRequest request, + final TransportRequestOptions options, + TransportResponseHandler handler) { if (node == null) { throw new IllegalStateException("can't send request to a null node"); } @@ -594,8 +608,9 @@ public class TransportService extends AbstractLifecycleComponent { * @param executor The executor the request handling will be executed on * @param handler The handler itself that implements the request handling */ - public void registerRequestHandler(String action, Supplier requestFactory, String executor, - TransportRequestHandler handler) { + public final void registerRequestHandler(String action, Supplier requestFactory, + String executor, TransportRequestHandler handler) { + handler = interceptor.interceptHandler(action, handler); RequestHandlerRegistry reg = new RequestHandlerRegistry<>( action, requestFactory, taskManager, handler, executor, false, true); registerRequestHandler(reg); @@ -611,10 +626,11 @@ public class TransportService extends AbstractLifecycleComponent { * @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached. * @param handler The handler itself that implements the request handling */ - public void registerRequestHandler(String action, Supplier request, + public final void registerRequestHandler(String action, Supplier request, String executor, boolean forceExecution, boolean canTripCircuitBreaker, TransportRequestHandler handler) { + handler = interceptor.interceptHandler(action, handler); RequestHandlerRegistry reg = new RequestHandlerRegistry<>( action, request, taskManager, handler, executor, forceExecution, canTripCircuitBreaker); registerRequestHandler(reg); @@ -744,12 +760,9 @@ public class TransportService extends AbstractLifecycleComponent { @Override public void raiseNodeConnected(final DiscoveryNode node) { - threadPool.generic().execute(new Runnable() { - @Override - public void run() { - for (TransportConnectionListener connectionListener : connectionListeners) { - connectionListener.onNodeConnected(node); - } + threadPool.generic().execute(() -> { + for (TransportConnectionListener connectionListener : connectionListeners) { + connectionListener.onNodeConnected(node); } }); } @@ -758,12 +771,7 @@ public class TransportService extends AbstractLifecycleComponent { public void raiseNodeDisconnected(final DiscoveryNode node) { try { for (final TransportConnectionListener connectionListener : connectionListeners) { - threadPool.generic().execute(new Runnable() { - @Override - public void run() { - connectionListener.onNodeDisconnected(node); - } - }); + threadPool.generic().execute(() -> connectionListener.onNodeDisconnected(node)); } for (Map.Entry entry : clientHandlers.entrySet()) { RequestHolder holder = entry.getValue(); @@ -772,12 +780,8 @@ public class TransportService extends AbstractLifecycleComponent { if (holderToNotify != null) { // callback that an exception happened, but on a different thread since we don't // want handlers to worry about stack overflows - threadPool.generic().execute(new Runnable() { - @Override - public void run() { - holderToNotify.handler().handleException(new NodeDisconnectedException(node, holderToNotify.action())); - } - }); + threadPool.generic().execute(() -> holderToNotify.handler().handleException(new NodeDisconnectedException(node, + holderToNotify.action()))); } } } @@ -1065,6 +1069,5 @@ public class TransportService extends AbstractLifecycleComponent { public String getChannelType() { return "direct"; } - } } diff --git a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java index 5b901536471..778a0801f52 100644 --- a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java @@ -78,11 +78,11 @@ import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; @@ -91,12 +91,10 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportService; import org.junit.After; import org.junit.Before; @@ -110,7 +108,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Function; -import java.util.function.Supplier; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; @@ -143,8 +140,7 @@ public class IndicesRequestIT extends ESIntegTestCase { return Settings.builder().put(super.nodeSettings(ordinal)) // InternalClusterInfoService sends IndicesStatsRequest periodically which messes with this test // this setting disables it... - .put("cluster.routing.allocation.disk.threshold_enabled", false) - .put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "intercepting").build(); + .put("cluster.routing.allocation.disk.threshold_enabled", false).build(); } @Override @@ -701,31 +697,39 @@ public class IndicesRequestIT extends ESIntegTestCase { } private static void assertAllRequestsHaveBeenConsumed() { - Iterable transportServices = internalCluster().getInstances(TransportService.class); - for (TransportService transportService : transportServices) { - assertThat(((InterceptingTransportService)transportService).requests.entrySet(), emptyIterable()); + Iterable pluginsServices = internalCluster().getInstances(PluginsService.class); + for (PluginsService pluginsService : pluginsServices) { + Set>> entries = + pluginsService.filterPlugins(InterceptingTransportService.TestPlugin.class).stream().findFirst().get() + .instance.requests.entrySet(); + assertThat(entries, emptyIterable()); + } } private static void clearInterceptedActions() { - Iterable transportServices = internalCluster().getInstances(TransportService.class); - for (TransportService transportService : transportServices) { - ((InterceptingTransportService) transportService).clearInterceptedActions(); + Iterable pluginsServices = internalCluster().getInstances(PluginsService.class); + for (PluginsService pluginsService : pluginsServices) { + pluginsService.filterPlugins(InterceptingTransportService.TestPlugin.class).stream().findFirst().get() + .instance.clearInterceptedActions(); } } private static void interceptTransportActions(String... actions) { - Iterable transportServices = internalCluster().getInstances(TransportService.class); - for (TransportService transportService : transportServices) { - ((InterceptingTransportService) transportService).interceptTransportActions(actions); + Iterable pluginsServices = internalCluster().getInstances(PluginsService.class); + for (PluginsService pluginsService : pluginsServices) { + pluginsService.filterPlugins(InterceptingTransportService.TestPlugin.class).stream().findFirst().get() + .instance.interceptTransportActions(actions); } } private static List consumeTransportRequests(String action) { List requests = new ArrayList<>(); - Iterable transportServices = internalCluster().getInstances(TransportService.class); - for (TransportService transportService : transportServices) { - List transportRequests = ((InterceptingTransportService) transportService).consumeRequests(action); + + Iterable pluginsServices = internalCluster().getInstances(PluginsService.class); + for (PluginsService pluginsService : pluginsServices) { + List transportRequests = pluginsService.filterPlugins(InterceptingTransportService.TestPlugin.class) + .stream().findFirst().get().instance.consumeRequests(action); if (transportRequests != null) { requests.addAll(transportRequests); } @@ -733,12 +737,12 @@ public class IndicesRequestIT extends ESIntegTestCase { return requests; } - public static class InterceptingTransportService extends TransportService { + public static class InterceptingTransportService implements TransportInterceptor { public static class TestPlugin extends Plugin { - + public final InterceptingTransportService instance = new InterceptingTransportService(); public void onModule(NetworkModule module) { - module.registerTransportService("intercepting", InterceptingTransportService.class); + module.addTransportInterceptor(instance); } } @@ -746,9 +750,10 @@ public class IndicesRequestIT extends ESIntegTestCase { private final Map> requests = new HashMap<>(); - @Inject - public InterceptingTransportService(Settings settings, Transport transport, ThreadPool threadPool) { - super(settings, transport, threadPool); + @Override + public TransportRequestHandler interceptHandler(String action, + TransportRequestHandler actualHandler) { + return new InterceptingRequestHandler<>(action, actualHandler); } synchronized List consumeRequests(String action) { @@ -763,19 +768,6 @@ public class IndicesRequestIT extends ESIntegTestCase { actions.clear(); } - @Override - public void registerRequestHandler(String action, Supplier request, String executor, - boolean forceExecution, boolean canTripCircuitBreaker, - TransportRequestHandler handler) { - super.registerRequestHandler(action, request, executor, forceExecution, canTripCircuitBreaker, new - InterceptingRequestHandler<>(action, handler)); - } - - @Override - public void registerRequestHandler(String action, Supplier requestFactory, String - executor, TransportRequestHandler handler) { - super.registerRequestHandler(action, requestFactory, executor, new InterceptingRequestHandler<>(action, handler)); - } private class InterceptingRequestHandler implements TransportRequestHandler { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 8bb32c240f2..c457d3a30fa 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -169,7 +169,7 @@ public abstract class TaskManagerTestCase extends ESTestCase { clusterService = createClusterService(threadPool); transportService = new TransportService(settings, new LocalTransport(settings, threadPool, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()), threadPool) { + new NoneCircuitBreakerService()), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR) { @Override protected TaskManager createTaskManager() { if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index 7c39adc76f6..4a2f3da952d 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -87,7 +87,8 @@ public class TransportBulkActionTookTests extends ESTestCase { private TransportBulkAction createAction(boolean controlled, AtomicLong expected) { CapturingTransport capturingTransport = new CapturingTransport(); - TransportService transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool); + TransportService transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); IndexNameExpressionResolver resolver = new Resolver(Settings.EMPTY); diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java index a602465197a..50bd3771bc3 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java @@ -33,29 +33,30 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import org.hamcrest.CustomTypeSafeMatcher; -import org.mockito.stubbing.Answer; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.CoreMatchers.equalTo; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.eq; import static org.mockito.Matchers.same; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; @@ -67,7 +68,7 @@ public class IngestProxyActionFilterTests extends ESTestCase { private TransportService transportService; @SuppressWarnings("unchecked") - private IngestProxyActionFilter buildFilter(int ingestNodes, int totalNodes) { + private IngestProxyActionFilter buildFilter(int ingestNodes, int totalNodes, TransportInterceptor interceptor) { ClusterState.Builder clusterState = new ClusterState.Builder(new ClusterName("_name")); DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(); DiscoveryNode localNode = null; @@ -88,7 +89,7 @@ public class IngestProxyActionFilterTests extends ESTestCase { ClusterService clusterService = mock(ClusterService.class); when(clusterService.localNode()).thenReturn(localNode); when(clusterService.state()).thenReturn(clusterState.build()); - transportService = mock(TransportService.class); + transportService = new TransportService(Settings.EMPTY, null, null, interceptor); return new IngestProxyActionFilter(clusterService, transportService); } @@ -97,7 +98,7 @@ public class IngestProxyActionFilterTests extends ESTestCase { ActionListener actionListener = mock(ActionListener.class); ActionFilterChain actionFilterChain = mock(ActionFilterChain.class); int totalNodes = randomIntBetween(1, 5); - IngestProxyActionFilter filter = buildFilter(0, totalNodes); + IngestProxyActionFilter filter = buildFilter(0, totalNodes, TransportService.NOOP_TRANSPORT_INTERCEPTOR); String action; ActionRequest request; @@ -114,7 +115,6 @@ public class IngestProxyActionFilterTests extends ESTestCase { } catch(IllegalStateException e) { assertThat(e.getMessage(), equalTo("There are no ingest nodes in this cluster, unable to forward request to an ingest node.")); } - verifyZeroInteractions(transportService); verifyZeroInteractions(actionFilterChain); verifyZeroInteractions(actionListener); } @@ -124,7 +124,8 @@ public class IngestProxyActionFilterTests extends ESTestCase { ActionListener actionListener = mock(ActionListener.class); ActionFilterChain actionFilterChain = mock(ActionFilterChain.class); int totalNodes = randomIntBetween(1, 5); - IngestProxyActionFilter filter = buildFilter(randomIntBetween(0, totalNodes - 1), totalNodes); + IngestProxyActionFilter filter = buildFilter(randomIntBetween(0, totalNodes - 1), totalNodes, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); String action; ActionRequest request; @@ -136,7 +137,6 @@ public class IngestProxyActionFilterTests extends ESTestCase { request = new BulkRequest().add(new IndexRequest()); } filter.apply(task, action, request, actionListener, actionFilterChain); - verifyZeroInteractions(transportService); verify(actionFilterChain).proceed(any(Task.class), eq(action), same(request), same(actionListener)); verifyZeroInteractions(actionListener); } @@ -147,11 +147,11 @@ public class IngestProxyActionFilterTests extends ESTestCase { ActionFilterChain actionFilterChain = mock(ActionFilterChain.class); ActionRequest request = mock(ActionRequest.class); int totalNodes = randomIntBetween(1, 5); - IngestProxyActionFilter filter = buildFilter(randomIntBetween(0, totalNodes - 1), totalNodes); + IngestProxyActionFilter filter = buildFilter(randomIntBetween(0, totalNodes - 1), totalNodes, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); String action = randomAsciiOfLengthBetween(1, 20); filter.apply(task, action, request, actionListener, actionFilterChain); - verifyZeroInteractions(transportService); verify(actionFilterChain).proceed(any(Task.class), eq(action), same(request), same(actionListener)); verifyZeroInteractions(actionListener); } @@ -162,19 +162,31 @@ public class IngestProxyActionFilterTests extends ESTestCase { ActionListener actionListener = mock(ActionListener.class); ActionFilterChain actionFilterChain = mock(ActionFilterChain.class); int totalNodes = randomIntBetween(2, 5); - IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes); - Answer answer = invocationOnMock -> { - TransportResponseHandler transportResponseHandler = (TransportResponseHandler) invocationOnMock.getArguments()[3]; - transportResponseHandler.handleResponse(new IndexResponse()); - return null; - }; - doAnswer(answer).when(transportService).sendRequest(any(DiscoveryNode.class), any(String.class), any(TransportRequest.class), any(TransportResponseHandler.class)); + AtomicBoolean run = new AtomicBoolean(false); + + IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes, + new TransportInterceptor() { + @Override + public AsyncSender interceptSender(AsyncSender sender) { + return new AsyncSender() { + @Override + public void sendRequest(DiscoveryNode node, String action, TransportRequest request, + TransportRequestOptions options, + TransportResponseHandler handler) { + assertTrue(run.compareAndSet(false, true)); + assertTrue(node.isIngestNode()); + assertEquals(action, IndexAction.NAME); + handler.handleResponse((T) new IndexResponse()); + } + }; + } + }); IndexRequest indexRequest = new IndexRequest().setPipeline("_id"); filter.apply(task, IndexAction.NAME, indexRequest, actionListener, actionFilterChain); - verify(transportService).sendRequest(argThat(new IngestNodeMatcher()), eq(IndexAction.NAME), same(indexRequest), any(TransportResponseHandler.class)); verifyZeroInteractions(actionFilterChain); + assertTrue(run.get()); verify(actionListener).onResponse(any(IndexResponse.class)); verify(actionListener, never()).onFailure(any(TransportException.class)); } @@ -185,13 +197,24 @@ public class IngestProxyActionFilterTests extends ESTestCase { ActionListener actionListener = mock(ActionListener.class); ActionFilterChain actionFilterChain = mock(ActionFilterChain.class); int totalNodes = randomIntBetween(2, 5); - IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes); - Answer answer = invocationOnMock -> { - TransportResponseHandler transportResponseHandler = (TransportResponseHandler) invocationOnMock.getArguments()[3]; - transportResponseHandler.handleResponse(new BulkResponse(null, -1)); - return null; - }; - doAnswer(answer).when(transportService).sendRequest(any(DiscoveryNode.class), any(String.class), any(TransportRequest.class), any(TransportResponseHandler.class)); + AtomicBoolean run = new AtomicBoolean(false); + IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes, + new TransportInterceptor() { + @Override + public AsyncSender interceptSender(AsyncSender sender) { + return new AsyncSender() { + @Override + public void sendRequest(DiscoveryNode node, String action, TransportRequest request, + TransportRequestOptions options, + TransportResponseHandler handler) { + assertTrue(run.compareAndSet(false, true)); + assertTrue(node.isIngestNode()); + assertEquals(action, BulkAction.NAME); + handler.handleResponse((T) new BulkResponse(null, -1)); + } + }; + } + }); BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(new IndexRequest().setPipeline("_id")); @@ -200,11 +223,10 @@ public class IngestProxyActionFilterTests extends ESTestCase { bulkRequest.add(new IndexRequest()); } filter.apply(task, BulkAction.NAME, bulkRequest, actionListener, actionFilterChain); - - verify(transportService).sendRequest(argThat(new IngestNodeMatcher()), eq(BulkAction.NAME), same(bulkRequest), any(TransportResponseHandler.class)); verifyZeroInteractions(actionFilterChain); verify(actionListener).onResponse(any(BulkResponse.class)); verify(actionListener, never()).onFailure(any(TransportException.class)); + assertTrue(run.get()); } @SuppressWarnings("unchecked") @@ -213,30 +235,39 @@ public class IngestProxyActionFilterTests extends ESTestCase { ActionListener actionListener = mock(ActionListener.class); ActionFilterChain actionFilterChain = mock(ActionFilterChain.class); int totalNodes = randomIntBetween(2, 5); - IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes); - Answer answer = invocationOnMock -> { - TransportResponseHandler transportResponseHandler = (TransportResponseHandler) invocationOnMock.getArguments()[3]; - transportResponseHandler.handleException(new TransportException(new IllegalArgumentException())); - return null; - }; - doAnswer(answer).when(transportService).sendRequest(any(DiscoveryNode.class), any(String.class), any(TransportRequest.class), any(TransportResponseHandler.class)); - - String action; + String requestAction; ActionRequest request; if (randomBoolean()) { - action = IndexAction.NAME; + requestAction = IndexAction.NAME; request = new IndexRequest().setPipeline("_id"); } else { - action = BulkAction.NAME; + requestAction = BulkAction.NAME; request = new BulkRequest().add(new IndexRequest().setPipeline("_id")); } - - filter.apply(task, action, request, actionListener, actionFilterChain); - - verify(transportService).sendRequest(argThat(new IngestNodeMatcher()), eq(action), same(request), any(TransportResponseHandler.class)); + AtomicBoolean run = new AtomicBoolean(false); + IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes, + new TransportInterceptor() { + @Override + public AsyncSender interceptSender(AsyncSender sender) { + return new AsyncSender() { + @Override + public void sendRequest(DiscoveryNode node, String action, TransportRequest request, + TransportRequestOptions options, + TransportResponseHandler handler) { + assertTrue(run.compareAndSet(false, true)); + assertTrue(node.isIngestNode()); + assertEquals(action, requestAction); + handler.handleException(new TransportException(new IllegalArgumentException())); + } + }; + } + }); + filter.apply(task, requestAction, request, actionListener, actionFilterChain); verifyZeroInteractions(actionFilterChain); verify(actionListener).onFailure(any(TransportException.class)); verify(actionListener, never()).onResponse(any(TransportResponse.class)); + assertTrue(run.get()); + } private static class IngestNodeMatcher extends CustomTypeSafeMatcher { diff --git a/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java b/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java index dd3da801067..05dcb9d1f1d 100644 --- a/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java @@ -120,7 +120,8 @@ public class MainActionTests extends ESTestCase { ClusterState state = ClusterState.builder(clusterName).blocks(blocks).build(); when(clusterService.state()).thenReturn(state); - TransportMainAction action = new TransportMainAction(settings, mock(ThreadPool.class), mock(TransportService.class), + TransportMainAction action = new TransportMainAction(settings, mock(ThreadPool.class), new TransportService(Settings.EMPTY, + null ,null, TransportService.NOOP_TRANSPORT_INTERCEPTOR), mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), clusterService); AtomicReference responseRef = new AtomicReference<>(); action.doExecute(new MainRequest(), new ActionListener() { diff --git a/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index 011fb172514..9df5bc82238 100644 --- a/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -57,8 +57,12 @@ public class TransportMultiSearchActionTests extends ESTestCase { when(actionFilters.filters()).thenReturn(new ActionFilter[0]); ThreadPool threadPool = new ThreadPool(settings); TaskManager taskManager = mock(TaskManager.class); - TransportService transportService = mock(TransportService.class); - when(transportService.getTaskManager()).thenReturn(taskManager); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR) { + @Override + public TaskManager getTaskManager() { + return taskManager; + } + }; ClusterService clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(ClusterState.builder(new ClusterName("test")).build()); IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(Settings.EMPTY); diff --git a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 7b237383034..a249a0e98ef 100644 --- a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -191,7 +191,8 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); - final TransportService transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL); + final TransportService transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); setClusterState(clusterService, TEST_INDEX); diff --git a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index 9aeafcac0e4..a7db99cc201 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -87,7 +87,7 @@ public class TransportMasterNodeActionTests extends ESTestCase { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(threadPool); - transportService = new TransportService(clusterService.getSettings(), transport, threadPool); + transportService = new TransportService(clusterService.getSettings(), transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); localNode = new DiscoveryNode("local_node", LocalTransportAddress.buildUnique(), Collections.emptyMap(), diff --git a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index 744a116f4a7..67cc64cb871 100644 --- a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -177,7 +177,8 @@ public class TransportNodesActionTests extends ESTestCase { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); - transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL); + transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); int numNodes = randomIntBetween(3, 10); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index b5b2cbeb737..2d098a065b5 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.action.support.replication; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.ShardOperationFailedException; @@ -92,7 +91,7 @@ public class BroadcastReplicationTests extends ESTestCase { super.setUp(); LocalTransport transport = new LocalTransport(Settings.EMPTY, threadPool, new NamedWriteableRegistry(Collections.emptyList()), circuitBreakerService); clusterService = createClusterService(threadPool); - transportService = new TransportService(clusterService.getSettings(), transport, threadPool); + transportService = new TransportService(clusterService.getSettings(), transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, threadPool, clusterService, transportService, new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY), null); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index f821f82c33a..b9bda2ec650 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -149,7 +149,8 @@ public class TransportReplicationActionTests extends ESTestCase { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(threadPool); - transportService = new TransportService(clusterService.getSettings(), transport, threadPool); + transportService = new TransportService(clusterService.getSettings(), transport, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 80e689743fd..a554ca53d99 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -130,7 +130,8 @@ public class TransportWriteActionTests extends ESTestCase { private class TestAction extends TransportWriteAction { protected TestAction() { - super(Settings.EMPTY, "test", mock(TransportService.class), null, null, null, null, new ActionFilters(new HashSet<>()), + super(Settings.EMPTY, "test", new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR), + null, null, null, null, new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, ThreadPool.Names.SAME); } diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index 37abc4d5eed..1d736060568 100644 --- a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -142,7 +142,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); - transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL); + transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); action = new TestTransportInstanceSingleOperationAction( diff --git a/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java b/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java index 276a43581a6..a82f964c013 100644 --- a/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java +++ b/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java @@ -73,8 +73,9 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase { protected ThreadPool threadPool; private Client client; - @Before - public void initClient() { + @Override + public void setUp() throws Exception { + super.setUp(); Settings settings = Settings.builder() .put(HEADER_SETTINGS) .put("path.home", createTempDir().toString()) @@ -85,8 +86,10 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase { client = buildClient(settings, ACTIONS); } - @After - public void cleanupClient() throws Exception { + + @Override + public void tearDown() throws Exception { + super.tearDown(); client.close(); terminate(threadPool); } diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java index e736e4b86a1..310a08cc2c9 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java @@ -31,48 +31,56 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; import java.util.Collections; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -/** - * - */ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase { - private static final LocalTransportAddress address = new LocalTransportAddress("test"); + private MockTransportService transportService; + + @Override + public void tearDown() throws Exception { + super.tearDown(); + transportService.stop(); + transportService.close(); + } @Override protected Client buildClient(Settings headersSettings, GenericAction[] testedActions) { + transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool); + transportService.start(); + transportService.acceptIncomingRequests(); TransportClient client = new MockTransportClient(Settings.builder() .put("client.transport.sniff", false) .put("cluster.name", "cluster1") .put("node.name", "transport_client_" + this.getTestName()) .put(headersSettings) - .build(), InternalTransportService.TestPlugin.class); - - client.addTransportAddress(address); + .build(), InternalTransportServiceInterceptor.TestPlugin.class); + InternalTransportServiceInterceptor.TestPlugin plugin = client.injector.getInstance(PluginsService.class) + .filterPlugins(InternalTransportServiceInterceptor.TestPlugin.class).stream().findFirst().get(); + plugin.instance.threadPool = client.threadPool(); + plugin.instance.address = transportService.boundAddress().publishAddress(); + client.addTransportAddress(transportService.boundAddress().publishAddress()); return client; } @@ -85,72 +93,77 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase { .put("client.transport.nodes_sampler_interval", "1s") .put(HEADER_SETTINGS) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(), - InternalTransportService.TestPlugin.class)) { - client.addTransportAddress(address); + InternalTransportServiceInterceptor.TestPlugin.class)) { + InternalTransportServiceInterceptor.TestPlugin plugin = client.injector.getInstance(PluginsService.class) + .filterPlugins(InternalTransportServiceInterceptor.TestPlugin.class).stream().findFirst().get(); + plugin.instance.threadPool = client.threadPool(); + plugin.instance.address = transportService.boundAddress().publishAddress(); + client.addTransportAddress(transportService.boundAddress().publishAddress()); - InternalTransportService service = (InternalTransportService) client.injector.getInstance(TransportService.class); - - if (!service.clusterStateLatch.await(5, TimeUnit.SECONDS)) { + if (!plugin.instance.clusterStateLatch.await(5, TimeUnit.SECONDS)) { fail("takes way too long to get the cluster state"); } assertThat(client.connectedNodes().size(), is(1)); - assertThat(client.connectedNodes().get(0).getAddress(), is((TransportAddress) address)); + assertThat(client.connectedNodes().get(0).getAddress(), is(transportService.boundAddress().publishAddress())); } } - public static class InternalTransportService extends TransportService { + public static class InternalTransportServiceInterceptor implements TransportInterceptor { + + ThreadPool threadPool; + TransportAddress address; + public static class TestPlugin extends Plugin { + private InternalTransportServiceInterceptor instance = new InternalTransportServiceInterceptor(); + public void onModule(NetworkModule transportModule) { - transportModule.registerTransportService("internal", InternalTransportService.class); - } - @Override - public Settings additionalSettings() { - return Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "internal").build(); + transportModule.addTransportInterceptor(new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler(String action, + TransportRequestHandler actualHandler) { + return instance.interceptHandler(action, actualHandler); + } + + @Override + public AsyncSender interceptSender(AsyncSender sender) { + return instance.interceptSender(sender); + } + }); } } - CountDownLatch clusterStateLatch = new CountDownLatch(1); - - @Inject - public InternalTransportService(Settings settings, Transport transport, ThreadPool threadPool) { - super(settings, transport, threadPool); - } - - @Override @SuppressWarnings("unchecked") - public void sendRequest(DiscoveryNode node, String action, TransportRequest request, - TransportRequestOptions options, TransportResponseHandler handler) { - if (TransportLivenessAction.NAME.equals(action)) { - assertHeaders(threadPool); - ((TransportResponseHandler) handler).handleResponse(new LivenessResponse(clusterName, node)); - return; - } - if (ClusterStateAction.NAME.equals(action)) { - assertHeaders(threadPool); - ClusterName cluster1 = new ClusterName("cluster1"); - ClusterState.Builder builder = ClusterState.builder(cluster1); - //the sniffer detects only data nodes - builder.nodes(DiscoveryNodes.builder().add(new DiscoveryNode("node_id", address, Collections.emptyMap(), - Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT))); - ((TransportResponseHandler) handler) - .handleResponse(new ClusterStateResponse(cluster1, builder.build())); - clusterStateLatch.countDown(); - return; - } - - handler.handleException(new TransportException("", new InternalException(action))); - } + final CountDownLatch clusterStateLatch = new CountDownLatch(1); @Override - public boolean nodeConnected(DiscoveryNode node) { - assertThat(node.getAddress(), equalTo(address)); - return true; - } + public AsyncSender interceptSender(AsyncSender sender) { + return new AsyncSender() { + @Override + public void sendRequest(DiscoveryNode node, String action, TransportRequest request, + TransportRequestOptions options, TransportResponseHandler handler) { + if (TransportLivenessAction.NAME.equals(action)) { + assertHeaders(threadPool); + ((TransportResponseHandler) handler).handleResponse( + new LivenessResponse(new ClusterName("cluster1"), node)); + return; + } + if (ClusterStateAction.NAME.equals(action)) { + assertHeaders(threadPool); + ClusterName cluster1 = new ClusterName("cluster1"); + ClusterState.Builder builder = ClusterState.builder(cluster1); + //the sniffer detects only data nodes + builder.nodes(DiscoveryNodes.builder().add(new DiscoveryNode("node_id", address, Collections.emptyMap(), + Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT))); + ((TransportResponseHandler) handler) + .handleResponse(new ClusterStateResponse(cluster1, builder.build())); + clusterStateLatch.countDown(); + return; + } - @Override - public void connectToNode(DiscoveryNode node) throws ConnectTransportException { - assertThat(node.getAddress(), equalTo(address)); + handler.handleException(new TransportException("", new InternalException(action))); + } + }; } } } diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index 41891c5831f..1596519651f 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -29,12 +29,12 @@ import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.Closeable; @@ -76,28 +76,22 @@ public class TransportClientNodesServiceTests extends ESTestCase { return new TestResponse(); } }; - transportService = new TransportService(settings, transport, threadPool) { + transportService = new TransportService(settings, transport, threadPool, new TransportInterceptor() { @Override - public void sendRequest(DiscoveryNode node, String action, - TransportRequest request, final TransportResponseHandler handler) { - if (TransportLivenessAction.NAME.equals(action)) { - super.sendRequest(node, action, request, wrapLivenessResponseHandler(handler, node, clusterName)); - } else { - super.sendRequest(node, action, request, handler); - } + public AsyncSender interceptSender(AsyncSender sender) { + return new AsyncSender() { + @Override + public void sendRequest(DiscoveryNode node, String action, TransportRequest request, + TransportRequestOptions options, TransportResponseHandler handler) { + if (TransportLivenessAction.NAME.equals(action)) { + sender.sendRequest(node, action, request, options, wrapLivenessResponseHandler(handler, node, clusterName)); + } else { + sender.sendRequest(node, action, request, options, handler); + } + } + }; } - - @Override - public void sendRequest(DiscoveryNode node, String action, TransportRequest request, - TransportRequestOptions options, - TransportResponseHandler handler) { - if (TransportLivenessAction.NAME.equals(action)) { - super.sendRequest(node, action, request, options, wrapLivenessResponseHandler(handler, node, clusterName)); - } else { - super.sendRequest(node, action, request, options, handler); - } - } - }; + }); transportService.start(); transportService.acceptIncomingRequests(); transportClientNodesService = diff --git a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index ed7a20dc87e..5bf2bc38c3e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -149,7 +149,7 @@ public class NodeConnectionsServiceTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); this.transport = new MockTransport(); - transportService = new TransportService(Settings.EMPTY, transport, THREAD_POOL); + transportService = new TransportService(Settings.EMPTY, transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); } diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index e0a04dc2f3c..e042fadca95 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -106,7 +106,7 @@ public class ShardStateActionTests extends ESTestCase { super.setUp(); this.transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); - transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL); + transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new TestShardStateAction(Settings.EMPTY, clusterService, transportService, null, null); diff --git a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index df5ebb0193a..93a0c7f9e5c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -92,7 +92,8 @@ public class ClusterStateHealthTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); clusterService = createClusterService(threadPool); - transportService = new TransportService(clusterService.getSettings(), new CapturingTransport(), threadPool); + transportService = new TransportService(clusterService.getSettings(), new CapturingTransport(), threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); } diff --git a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java index 734068347b3..1590868ff85 100644 --- a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java +++ b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -35,18 +35,12 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.cat.AbstractCatAction; import org.elasticsearch.test.transport.AssertingLocalTransport; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.TransportInterceptor; import java.util.Collections; public class NetworkModuleTests extends ModuleTestCase { - static class FakeTransportService extends TransportService { - public FakeTransportService() { - super(null, null, null); - } - } - static class FakeTransport extends AssertingLocalTransport { public FakeTransport() { super(null, null, null, null); @@ -101,23 +95,6 @@ public class NetworkModuleTests extends ModuleTestCase { } } - public void testRegisterTransportService() { - Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "custom") - .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "local") - .build(); - NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false); - module.registerTransportService("custom", FakeTransportService.class); - assertBinding(module, TransportService.class, FakeTransportService.class); - assertFalse(module.isTransportClient()); - - // check it works with transport only as well - module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, true); - module.registerTransportService("custom", FakeTransportService.class); - assertBinding(module, TransportService.class, FakeTransportService.class); - assertTrue(module.isTransportClient()); - } - public void testRegisterTransport() { Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom") .put(NetworkModule.HTTP_ENABLED.getKey(), false) @@ -161,4 +138,27 @@ public class NetworkModuleTests extends ModuleTestCase { assertNotBound(module, HttpServerTransport.class); assertFalse(module.isTransportClient()); } + + public void testRegisterInterceptor() { + Settings settings = Settings.builder() + .put(NetworkModule.HTTP_ENABLED.getKey(), false) + .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + + NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false); + TransportInterceptor interceptor = new TransportInterceptor() {}; + module.addTransportInterceptor(interceptor); + assertInstanceBinding(module, TransportInterceptor.class, i -> { + if (i instanceof NetworkModule.CompositeTransportInterceptor) { + assertEquals(((NetworkModule.CompositeTransportInterceptor)i).transportInterceptors.size(), 1); + return ((NetworkModule.CompositeTransportInterceptor)i).transportInterceptors.get(0) == interceptor; + } + return false; + }); + + NullPointerException nullPointerException = expectThrows(NullPointerException.class, () -> { + module.addTransportInterceptor(null); + }); + assertEquals("interceptor must not be null", nullPointerException.getMessage()); + + } } diff --git a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java index b1b7749d88c..d51447c9298 100644 --- a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java @@ -147,7 +147,7 @@ public class ZenFaultDetectionTests extends ESTestCase { return version; } }, - threadPool); + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); return transportService; diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java index e04b0b52d81..bdffb5f99d6 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java @@ -210,7 +210,8 @@ public class UnicastZenPingTests extends ESTestCase { Version version) { MockTcpTransport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), networkService, version); - final TransportService transportService = new TransportService(settings, transport, threadPool); + final TransportService transportService = new TransportService(settings, transport, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); ConcurrentMap counters = ConcurrentCollections.newConcurrentMap(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java index 111f4b470d4..d634d8cd4fe 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java @@ -77,7 +77,8 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase { clusterService = createClusterService(THREAD_POOL); transport = new LocalTransport(settings, THREAD_POOL, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService()); - transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL); + transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); indicesService = getInstanceFromNode(IndicesService.class); shardStateAction = new ShardStateAction(settings, clusterService, transportService, null, null, THREAD_POOL); actionFilters = new ActionFilters(Collections.emptySet()); diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index a5e4b2ab7e1..7adddd6ee0b 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -150,7 +150,8 @@ public class ClusterStateChanges extends AbstractComponent { } // services - TransportService transportService = new TransportService(settings, transport, threadPool); + TransportService transportService = new TransportService(settings, transport, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(settings, null, null) { // metaData upgrader should do nothing @Override diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index e9a1476449f..a4cdabb0b3b 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -336,7 +336,8 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice final Executor executor = mock(Executor.class); when(threadPool.generic()).thenReturn(executor); final MockIndicesService indicesService = indicesServiceSupplier.get(); - final TransportService transportService = new TransportService(Settings.EMPTY, null, threadPool); + final TransportService transportService = new TransportService(Settings.EMPTY, null, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); final ClusterService clusterService = mock(ClusterService.class); final RepositoriesService repositoriesService = new RepositoriesService(Settings.EMPTY, clusterService, transportService, null); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java index 7dea17f3830..62b5bc30a68 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java @@ -88,7 +88,7 @@ public class IndicesStoreTests extends ESTestCase { public void before() { localNode = new DiscoveryNode("abc", new LocalTransportAddress("abc"), emptyMap(), emptySet(), Version.CURRENT); clusterService = createClusterService(threadPool); - indicesStore = new IndicesStore(Settings.EMPTY, null, clusterService, new TransportService(clusterService.getSettings(), null, null), null); + indicesStore = new IndicesStore(Settings.EMPTY, null, clusterService, new TransportService(clusterService.getSettings(), null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR), null); } @After diff --git a/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java b/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java deleted file mode 100644 index e5c734cbfb6..00000000000 --- a/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.ModuleTestCase; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.test.transport.AssertingLocalTransport; -import org.elasticsearch.threadpool.ThreadPool; - -/** Unit tests for module registering custom transport and transport service */ -public class TransportModuleTests extends ModuleTestCase { - - - - static class FakeTransport extends AssertingLocalTransport { - @Inject - public FakeTransport(Settings settings, CircuitBreakerService circuitBreakerService, ThreadPool threadPool, - NamedWriteableRegistry namedWriteableRegistry) { - super(settings, circuitBreakerService, threadPool, namedWriteableRegistry); - } - } - - static class FakeTransportService extends TransportService { - @Inject - public FakeTransportService(Settings settings, Transport transport, ThreadPool threadPool) { - super(settings, transport, threadPool); - } - } -} diff --git a/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java b/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java index cfc4de6b3c0..531c06f5bec 100644 --- a/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java +++ b/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java @@ -65,7 +65,8 @@ public class TransportServiceHandshakeTests extends ESTestCase { new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), new NetworkService(settings, Collections.emptyList())); - TransportService transportService = new MockTransportService(settings, transport, threadPool); + TransportService transportService = new MockTransportService(settings, transport, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); transportService.start(); transportService.acceptIncomingRequests(); DiscoveryNode node = diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java index a7d9805cf3c..ba72ade58e7 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java @@ -67,7 +67,8 @@ public class Netty3SizeHeaderFrameDecoderTests extends ESTestCase { nettyTransport = new Netty3Transport(settings, threadPool, networkService, bigArrays, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService()); nettyTransport.start(); - TransportService transportService = new TransportService(settings, nettyTransport, threadPool); + TransportService transportService = new TransportService(settings, nettyTransport, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); nettyTransport.transportServiceAdapter(transportService.createAdapter()); TransportAddress[] boundAddresses = nettyTransport.boundAddress().boundAddresses(); diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java index bb169aa8d70..7c44fc4d4ea 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseOptions; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; import java.io.IOException; @@ -65,13 +66,13 @@ public class Netty3ScheduledPingTests extends ESTestCase { NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList()); final Netty3Transport nettyA = new Netty3Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); - MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool); + MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); serviceA.start(); serviceA.acceptIncomingRequests(); final Netty3Transport nettyB = new Netty3Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); - MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool); + MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); serviceB.start(); serviceB.acceptIncomingRequests(); diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java index 5b862908ea8..b90b788f904 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; import java.net.InetAddress; @@ -55,7 +56,7 @@ public class SimpleNetty3TransportTests extends AbstractSimpleTransportTestCase return version; } }; - return new MockTransportService(Settings.EMPTY, transport, threadPool); + return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); } @Override diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java index 03f7e5fdab7..0b8d5fb6a35 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportResponseOptions; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; import java.io.IOException; @@ -65,13 +66,13 @@ public class Netty4ScheduledPingTests extends ESTestCase { NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList()); final Netty4Transport nettyA = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); - MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool); + MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); serviceA.start(); serviceA.acceptIncomingRequests(); final Netty4Transport nettyB = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); - MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool); + MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); serviceB.start(); serviceB.acceptIncomingRequests(); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 8902d6c1095..3a3a4587cac 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; import java.net.InetAddress; @@ -55,7 +56,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase return version; } }; - return new MockTransportService(Settings.EMPTY, transport, threadPool); + return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); } @Override diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java index abd91c8c07f..f38ae218ec0 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.MockTcpTransport; +import org.elasticsearch.transport.TransportService; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -73,7 +74,7 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase { new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), new NetworkService(Settings.EMPTY, Collections.emptyList())); - transportService = new MockTransportService(Settings.EMPTY, transport, threadPool); + transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); } public void testBuildDynamicNodes() throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index a1be5769b61..bdcdeef227c 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -20,6 +20,8 @@ package org.elasticsearch.node; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; @@ -31,7 +33,9 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; import java.util.Collection; @@ -75,5 +79,22 @@ public class MockNode extends Node { } return new MockSearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase); } + + @Override + protected NetworkModule createNetworkModule(Settings settings, NetworkService networkService) { + // we use the MockTransportService.TestPlugin class as a marker to create a newtwork + // module with this MockNetworkService. NetworkService is such an integral part of the systme + // we don't allow to plug it in from plugins or anything. this is a test-only override and + // can't be done in a production env. + if (getPluginsService().filterPlugins(MockTransportService.TestPlugin.class).size() == 1) { + return new NetworkModule(networkService, settings, false) { + @Override + protected void bindTransportService() { + bind(TransportService.class).to(MockTransportService.class).asEagerSingleton(); + } + }; + } + return super.createNetworkModule(settings, networkService); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java index 8725ed815ad..2e8001bf0f4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java @@ -109,7 +109,6 @@ final class ExternalNode implements Closeable { case "path.home": case NetworkModule.TRANSPORT_TYPE_KEY: case "discovery.type": - case NetworkModule.TRANSPORT_SERVICE_TYPE_KEY: case "config.ignore_system_properties": continue; default: diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 90ac8ed78ca..f9b7e1d3a89 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -21,6 +21,7 @@ package org.elasticsearch.test.transport; import org.elasticsearch.Version; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -29,7 +30,6 @@ import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; @@ -72,21 +72,13 @@ import java.util.concurrent.CopyOnWriteArrayList; * (for example, @see org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing, which constructs * fake DiscoveryNode instances where the publish address is one of the bound addresses). */ -public class MockTransportService extends TransportService { +public final class MockTransportService extends TransportService { public static class TestPlugin extends Plugin { - public void onModule(NetworkModule module) { - module.registerTransportService("mock", MockTransportService.class); - } - @Override public List> getSettings() { return Arrays.asList(MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING); } - @Override - public Settings additionalSettings() { - return Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "mock").build(); - } } public static MockTransportService local(Settings settings, Version version, ThreadPool threadPool) { @@ -97,14 +89,14 @@ public class MockTransportService extends TransportService { return version; } }; - return new MockTransportService(settings, transport, threadPool); + return new MockTransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); } private final Transport original; @Inject - public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool) { - super(settings, new LookupTestTransport(transport), threadPool); + public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor) { + super(settings, new LookupTestTransport(transport), threadPool, interceptor); this.original = transport; } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java index 2b0f551dbb7..a198ef77956 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java @@ -37,5 +37,4 @@ public class MockTransportClient extends TransportClient { public MockTransportClient(Settings settings, Collection> plugins) { super(settings, DEFAULT_SETTINGS, plugins); } - } diff --git a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java index 4ee7b0c7b27..e6a563b3e89 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java @@ -34,7 +34,8 @@ public class MockTcpTransportTests extends AbstractSimpleTransportTestCase { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(settings, Collections.emptyList()), version); - MockTransportService mockTransportService = new MockTransportService(Settings.EMPTY, transport, threadPool); + MockTransportService mockTransportService = new MockTransportService(Settings.EMPTY, transport, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR); mockTransportService.start(); return mockTransportService; } From 3aabda6752ced45137bb6d3ad1bd1720f348cbbb Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 16 Sep 2016 16:11:53 +0200 Subject: [PATCH 13/25] Ensure elasticsearch doesn't start with unuspported indices (#20514) If an index was created with pre 2.0 we should not treat it as supported even if all segments have been upgraded to a supported lucene version. Closes #20512 --- .../metadata/MetaDataIndexUpgradeService.java | 11 +------- .../MetaDataIndexUpgradeServiceTests.java | 26 +++++++++++++++++-- 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index d1141aeb9f4..fa55043f61a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -100,16 +100,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { * Returns true if this index can be supported by the current version of elasticsearch */ private static boolean isSupportedVersion(IndexMetaData indexMetaData) { - if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1)) { - // The index was created with elasticsearch that was using Lucene 5.2.1 - return true; - } - if (indexMetaData.getMinimumCompatibleVersion() != null && - indexMetaData.getMinimumCompatibleVersion().onOrAfter(org.apache.lucene.util.Version.LUCENE_5_0_0)) { - //The index was upgraded we can work with it - return true; - } - return false; + return indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1); } /** diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java index 52c52242c0f..376feb305a1 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java @@ -56,7 +56,8 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase { } public void testUpgrade() { - MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); + MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(), + Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build()); assertFalse(service.isUpgraded(src)); src = service.upgradeIndexMetaData(src); @@ -67,7 +68,8 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase { } public void testIsUpgraded() { - MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); + MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(), + Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build()); assertFalse(service.isUpgraded(src)); Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion()); @@ -77,6 +79,26 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase { assertTrue(service.isUpgraded(src)); } + public void testFailUpgrade() { + MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(), + Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); + final IndexMetaData metaData = newIndexMeta("foo", Settings.builder() + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_2_0_0_beta1) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("1.7.0")) + .put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, + Version.CURRENT.luceneVersion.toString()).build()); + String message = expectThrows(IllegalStateException.class, () -> service.upgradeIndexMetaData(metaData)).getMessage(); + assertEquals(message, "The index [[foo/BOOM]] was created before v2.0.0.beta1. It should be reindexed in Elasticsearch 2.x " + + "before upgrading to " + Version.CURRENT.toString() + "."); + + IndexMetaData goodMeta = newIndexMeta("foo", Settings.builder() + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_2_0_0_beta1) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("2.1.0")) + .put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, + Version.CURRENT.luceneVersion.toString()).build()); + service.upgradeIndexMetaData(goodMeta); + } + public static IndexMetaData newIndexMeta(String name, Settings indexSettings) { Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) From 01a6b7c4085efcaa314c01b6d22c5b59771d776c Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Fri, 16 Sep 2016 09:08:55 -0500 Subject: [PATCH 14/25] [TEST] Refactor Geo test names to follow naming and style convention This commit closes a stale issue where GeoJson parsing tests used a combination of underscore and camel case. closes #8998 --- .../common/geo/GeoJSONShapeParserTests.java | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java b/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java index 76376a4d30d..21112b97873 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java @@ -58,7 +58,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { private static final GeometryFactory GEOMETRY_FACTORY = SPATIAL_CONTEXT.getGeometryFactory(); - public void testParse_simplePoint() throws IOException { + public void testParseSimplePoint() throws IOException { String pointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Point") .startArray("coordinates").value(100.0).value(0.0).endArray() .endObject().string(); @@ -67,7 +67,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { assertGeometryEquals(new JtsPoint(expected, SPATIAL_CONTEXT), pointGeoJson); } - public void testParse_lineString() throws IOException { + public void testParseLineString() throws IOException { String lineGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "LineString") .startArray("coordinates") .startArray().value(100.0).value(0.0).endArray() @@ -84,7 +84,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { assertGeometryEquals(jtsGeom(expected), lineGeoJson); } - public void testParse_multiLineString() throws IOException { + public void testParseMultiLineString() throws IOException { String multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiLineString") .startArray("coordinates") .startArray() @@ -111,7 +111,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { assertGeometryEquals(jtsGeom(expected), multilinesGeoJson); } - public void testParse_circle() throws IOException { + public void testParseCircle() throws IOException { String multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "circle") .startArray("coordinates").value(100.0).value(0.0).endArray() .field("radius", "100m") @@ -121,7 +121,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { assertGeometryEquals(expected, multilinesGeoJson); } - public void testParse_multiDimensionShapes() throws IOException { + public void testParseMultiDimensionShapes() throws IOException { // multi dimension point String pointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Point") .startArray("coordinates").value(100.0).value(0.0).value(15.0).value(18.0).endArray() @@ -147,7 +147,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { assertGeometryEquals(jtsGeom(expectedLS), lineGeoJson); } - public void testParse_envelope() throws IOException { + public void testParseEnvelope() throws IOException { // test #1: envelope with expected coordinate order (TopLeft, BottomRight) String multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "envelope") .startArray("coordinates") @@ -192,7 +192,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); } - public void testParse_polygonNoHoles() throws IOException { + public void testParsePolygonNoHoles() throws IOException { String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon") .startArray("coordinates") .startArray() @@ -217,7 +217,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { assertGeometryEquals(jtsGeom(expected), polygonGeoJson); } - public void testParse_invalidPoint() throws IOException { + public void testParseInvalidPoint() throws IOException { // test case 1: create an invalid point object with multipoint data format String invalidPoint1 = XContentFactory.jsonBuilder().startObject().field("type", "point") .startArray("coordinates") @@ -238,7 +238,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); } - public void testParse_invalidMultipoint() throws IOException { + public void testParseInvalidMultipoint() throws IOException { // test case 1: create an invalid multipoint object with single coordinate String invalidMultipoint1 = XContentFactory.jsonBuilder().startObject().field("type", "multipoint") .startArray("coordinates").value(-74.011).value(40.753).endArray() @@ -267,7 +267,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); } - public void testParse_invalidMultiPolygon() throws IOException { + public void testParseInvalidMultiPolygon() throws IOException { // test invalid multipolygon (an "accidental" polygon with inner rings outside outer ring) String multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon") .startArray("coordinates") @@ -302,7 +302,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { ElasticsearchGeoAssertions.assertValidException(parser, InvalidShapeException.class); } - public void testParse_OGCPolygonWithoutHoles() throws IOException { + public void testParseOGCPolygonWithoutHoles() throws IOException { // test 1: ccw poly not crossing dateline String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon") .startArray("coordinates") @@ -384,7 +384,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { ElasticsearchGeoAssertions.assertMultiPolygon(shape); } - public void testParse_OGCPolygonWithHoles() throws IOException { + public void testParseOGCPolygonWithHoles() throws IOException { // test 1: ccw poly not crossing dateline String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon") .startArray("coordinates") @@ -490,7 +490,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { ElasticsearchGeoAssertions.assertMultiPolygon(shape); } - public void testParse_invalidPolygon() throws IOException { + public void testParseInvalidPolygon() throws IOException { /** * The following 3 test cases ensure proper error handling of invalid polygons * per the GeoJSON specification @@ -579,7 +579,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); } - public void testParse_polygonWithHole() throws IOException { + public void testParsePolygonWithHole() throws IOException { String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon") .startArray("coordinates") .startArray() @@ -623,7 +623,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { assertGeometryEquals(jtsGeom(expected), polygonGeoJson); } - public void testParse_selfCrossingPolygon() throws IOException { + public void testParseSelfCrossingPolygon() throws IOException { // test self crossing ccw poly not crossing dateline String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon") .startArray("coordinates") @@ -644,7 +644,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { ElasticsearchGeoAssertions.assertValidException(parser, InvalidShapeException.class); } - public void testParse_multiPoint() throws IOException { + public void testParseMultiPoint() throws IOException { String multiPointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPoint") .startArray("coordinates") .startArray().value(100.0).value(0.0).endArray() @@ -658,7 +658,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { assertGeometryEquals(expected, multiPointGeoJson); } - public void testParse_multiPolygon() throws IOException { + public void testParseMultiPolygon() throws IOException { // test #1: two polygons; one without hole, one with hole String multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon") .startArray("coordinates") @@ -770,7 +770,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { assertGeometryEquals(jtsGeom(withHoles), multiPolygonGeoJson); } - public void testParse_geometryCollection() throws IOException { + public void testParseGeometryCollection() throws IOException { String geometryCollectionGeoJson = XContentFactory.jsonBuilder().startObject() .field("type", "GeometryCollection") .startArray("geometries") @@ -822,7 +822,7 @@ public class GeoJSONShapeParserTests extends ESTestCase { assertGeometryEquals(new JtsPoint(expected, SPATIAL_CONTEXT), pointGeoJson); } - public void testParse_orientationOption() throws IOException { + public void testParseOrientationOption() throws IOException { // test 1: valid ccw (right handed system) poly not crossing dateline (with 'right' field) String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon") .field("orientation", "right") From d0f4bc16caf9d9da3319b42bc35e9382f37704c9 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 16 Sep 2016 16:15:46 +0200 Subject: [PATCH 15/25] Fix FieldStats deserialization of `ip` field (#20522) * Fix FieldStats deserialization of `ip` field Add missing readBytes in `ip` field deserialization Add (de)serialization tests for all types This change also removes the ability to set FieldStats.minValue or FieldStats.maxValue to null. This is not required anymore since the stats are built on fields with values only. Fixes #20516 --- .../action/fieldstats/FieldStats.java | 172 ++++++++---------- .../fieldstats/FieldStatsTests.java | 54 ++++++ 2 files changed, 125 insertions(+), 101 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java index 4a4f106b085..1b2f1dc5ed5 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.net.InetAddress; +import java.util.Objects; public abstract class FieldStats implements Writeable, ToXContent { private final byte type; @@ -46,13 +47,11 @@ public abstract class FieldStats implements Writeable, ToXContent { protected T minValue; protected T maxValue; - FieldStats(byte type, long maxDoc, boolean isSearchable, boolean isAggregatable) { - this(type, maxDoc, 0, 0, 0, isSearchable, isAggregatable, null, null); - } - FieldStats(byte type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, boolean isSearchable, boolean isAggregatable, T minValue, T maxValue) { + Objects.requireNonNull(minValue, "minValue must not be null"); + Objects.requireNonNull(maxValue, "maxValue must not be null"); this.type = type; this.maxDoc = maxDoc; this.docCount = docCount; @@ -220,14 +219,10 @@ public abstract class FieldStats implements Writeable, ToXContent { } private void updateMinMax(T min, T max) { - if (minValue == null) { - minValue = min; - } else if (min != null && compare(minValue, min) > 0) { + if (compare(minValue, min) > 0) { minValue = min; } - if (maxValue == null) { - maxValue = max; - } else if (max != null && compare(maxValue, max) < 0) { + if (compare(maxValue, max) < 0) { maxValue = max; } } @@ -266,11 +261,7 @@ public abstract class FieldStats implements Writeable, ToXContent { out.writeLong(sumTotalTermFreq); out.writeBoolean(isSearchable); out.writeBoolean(isAggregatable); - boolean hasMinMax = minValue != null; - out.writeBoolean(hasMinMax); - if (hasMinMax) { - writeMinMax(out); - } + writeMinMax(out); } protected abstract void writeMinMax(StreamOutput out) throws IOException; @@ -280,9 +271,6 @@ public abstract class FieldStats implements Writeable, ToXContent { * otherwise false is returned */ public boolean match(IndexConstraint constraint) { - if (minValue == null) { - return false; - } int cmp; T value = valueOf(constraint.getValue(), constraint.getOptionalFormat()); if (constraint.getProperty() == IndexConstraint.Property.MIN) { @@ -307,6 +295,31 @@ public abstract class FieldStats implements Writeable, ToXContent { } } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + FieldStats that = (FieldStats) o; + + if (type != that.type) return false; + if (maxDoc != that.maxDoc) return false; + if (docCount != that.docCount) return false; + if (sumDocFreq != that.sumDocFreq) return false; + if (sumTotalTermFreq != that.sumTotalTermFreq) return false; + if (isSearchable != that.isSearchable) return false; + if (isAggregatable != that.isAggregatable) return false; + if (!minValue.equals(that.minValue)) return false; + return maxValue.equals(that.maxValue); + + } + + @Override + public int hashCode() { + return Objects.hash(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable, + minValue, maxValue); + } + public static class Long extends FieldStats { public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, boolean isSearchable, boolean isAggregatable, @@ -315,17 +328,6 @@ public abstract class FieldStats implements Writeable, ToXContent { isSearchable, isAggregatable, minValue, maxValue); } - public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, - boolean isSearchable, boolean isAggregatable) { - super((byte) 0, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, - isSearchable, isAggregatable, null, null); - } - - public Long(long maxDoc, - boolean isSearchable, boolean isAggregatable) { - super((byte) 0, maxDoc, isSearchable, isAggregatable); - } - @Override public int compare(java.lang.Long o1, java.lang.Long o2) { return o1.compareTo(o2); @@ -344,12 +346,12 @@ public abstract class FieldStats implements Writeable, ToXContent { @Override public String getMinValueAsString() { - return minValue != null ? java.lang.Long.toString(minValue) : null; + return java.lang.Long.toString(minValue); } @Override public String getMaxValueAsString() { - return maxValue != null ? java.lang.Long.toString(maxValue) : null; + return java.lang.Long.toString(maxValue); } } @@ -361,15 +363,6 @@ public abstract class FieldStats implements Writeable, ToXContent { minValue, maxValue); } - public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, - boolean isSearchable, boolean isAggregatable) { - super((byte) 1, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable, null, null); - } - - public Double(long maxDoc, boolean isSearchable, boolean isAggregatable) { - super((byte) 1, maxDoc, isSearchable, isAggregatable); - } - @Override public int compare(java.lang.Double o1, java.lang.Double o2) { return o1.compareTo(o2); @@ -391,12 +384,12 @@ public abstract class FieldStats implements Writeable, ToXContent { @Override public String getMinValueAsString() { - return minValue != null ? java.lang.Double.toString(minValue) : null; + return java.lang.Double.toString(minValue); } @Override public String getMaxValueAsString() { - return maxValue != null ? java.lang.Double.toString(maxValue) : null; + return java.lang.Double.toString(maxValue); } } @@ -412,20 +405,6 @@ public abstract class FieldStats implements Writeable, ToXContent { this.formatter = formatter; } - public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, - boolean isSearchable, boolean isAggregatable, - FormatDateTimeFormatter formatter) { - super((byte) 2, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable, - null, null); - this.formatter = formatter; - } - - public Date(long maxDoc, boolean isSearchable, boolean isAggregatable, - FormatDateTimeFormatter formatter) { - super((byte) 2, maxDoc, isSearchable, isAggregatable); - this.formatter = formatter; - } - @Override public int compare(java.lang.Long o1, java.lang.Long o2) { return o1.compareTo(o2); @@ -449,12 +428,29 @@ public abstract class FieldStats implements Writeable, ToXContent { @Override public String getMinValueAsString() { - return minValue != null ? formatter.printer().print(minValue) : null; + return formatter.printer().print(minValue); } @Override public String getMaxValueAsString() { - return maxValue != null ? formatter.printer().print(maxValue) : null; + return formatter.printer().print(maxValue); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + + Date that = (Date) o; + return Objects.equals(formatter.format(), that.formatter.format()); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + formatter.format().hashCode(); + return result; } } @@ -467,10 +463,6 @@ public abstract class FieldStats implements Writeable, ToXContent { minValue, maxValue); } - public Text(long maxDoc, boolean isSearchable, boolean isAggregatable) { - super((byte) 3, maxDoc, isSearchable, isAggregatable); - } - @Override public int compare(BytesRef o1, BytesRef o2) { return o1.compareTo(o2); @@ -492,12 +484,12 @@ public abstract class FieldStats implements Writeable, ToXContent { @Override public String getMinValueAsString() { - return minValue != null ? minValue.utf8ToString() : null; + return minValue.utf8ToString(); } @Override public String getMaxValueAsString() { - return maxValue != null ? maxValue.utf8ToString() : null; + return maxValue.utf8ToString(); } @Override @@ -516,10 +508,6 @@ public abstract class FieldStats implements Writeable, ToXContent { minValue, maxValue); } - public Ip(long maxDoc, boolean isSearchable, boolean isAggregatable) { - super((byte) 4, maxDoc, isSearchable, isAggregatable); - } - @Override public int compare(InetAddress o1, InetAddress o2) { byte[] b1 = InetAddressPoint.encode(o1); @@ -544,12 +532,12 @@ public abstract class FieldStats implements Writeable, ToXContent { @Override public String getMinValueAsString() { - return minValue != null ? NetworkAddress.format(minValue) : null; + return NetworkAddress.format(minValue); } @Override public String getMaxValueAsString() { - return maxValue != null ? NetworkAddress.format(maxValue) : null; + return NetworkAddress.format(maxValue); } } @@ -561,53 +549,35 @@ public abstract class FieldStats implements Writeable, ToXContent { long sumTotalTermFreq = in.readLong(); boolean isSearchable = in.readBoolean(); boolean isAggregatable = in.readBoolean(); - boolean hasMinMax = in.readBoolean(); switch (type) { case 0: - if (hasMinMax) { - return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, - isSearchable, isAggregatable, in.readLong(), in.readLong()); - } return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, - isSearchable, isAggregatable); + isSearchable, isAggregatable, in.readLong(), in.readLong()); case 1: - if (hasMinMax) { - return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, - isSearchable, isAggregatable, in.readDouble(), in.readDouble()); - } return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, - isSearchable, isAggregatable); + isSearchable, isAggregatable, in.readDouble(), in.readDouble()); case 2: FormatDateTimeFormatter formatter = Joda.forPattern(in.readString()); - if (hasMinMax) { - return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, - isSearchable, isAggregatable, formatter, in.readLong(), in.readLong()); - } return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, - isSearchable, isAggregatable, formatter); + isSearchable, isAggregatable, formatter, in.readLong(), in.readLong()); + case 3: - if (hasMinMax) { - return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, - isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef()); - } return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, - isSearchable, isAggregatable, null, null); + isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef()); case 4: - InetAddress min = null; - InetAddress max = null; - if (hasMinMax) { - int l1 = in.readByte(); - byte[] b1 = new byte[l1]; - int l2 = in.readByte(); - byte[] b2 = new byte[l2]; - min = InetAddressPoint.decode(b1); - max = InetAddressPoint.decode(b2); - } + int l1 = in.readByte(); + byte[] b1 = new byte[l1]; + in.readBytes(b1, 0, l1); + int l2 = in.readByte(); + byte[] b2 = new byte[l2]; + in.readBytes(b2, 0, l2); + InetAddress min = InetAddressPoint.decode(b1); + InetAddress max = InetAddressPoint.decode(b2); return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable, min, max); diff --git a/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java b/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java index 4a5f79a12a8..8cd1b479416 100644 --- a/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java +++ b/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java @@ -23,13 +23,19 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.action.fieldstats.FieldStatsResponse; import org.elasticsearch.action.fieldstats.IndexConstraint; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; import java.util.ArrayList; +import java.util.Date; import java.util.List; import java.util.Locale; @@ -513,4 +519,52 @@ public class FieldStatsTests extends ESSingleNodeTestCase { assertThat(response.getAllFieldStats().get("_type").isSearchable(), equalTo(true)); assertThat(response.getAllFieldStats().get("_type").isAggregatable(), equalTo(true)); } + + public void testSerialization() throws IOException { + for (int i = 0; i < 20; i++) { + assertSerialization(randomFieldStats()); + } + } + + /** + * creates a random field stats which does not guarantee that {@link FieldStats#maxValue} is greater than {@link FieldStats#minValue} + **/ + private FieldStats randomFieldStats() throws UnknownHostException { + int type = randomInt(5); + switch (type) { + case 0: + return new FieldStats.Long(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), + randomPositiveLong(), randomBoolean(), randomBoolean(), randomLong(), randomLong()); + case 1: + return new FieldStats.Double(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), + randomPositiveLong(), randomBoolean(), randomBoolean(), randomDouble(), randomDouble()); + case 2: + return new FieldStats.Date(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), + randomPositiveLong(), randomBoolean(), randomBoolean(), Joda.forPattern("basicDate"), + new Date().getTime(), new Date().getTime()); + case 3: + return new FieldStats.Text(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), + randomPositiveLong(), randomBoolean(), randomBoolean(), + new BytesRef(randomAsciiOfLength(10)), new BytesRef(randomAsciiOfLength(20))); + case 4: + return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), + randomPositiveLong(), randomBoolean(), randomBoolean(), + InetAddress.getByName("::1"), InetAddress.getByName("::1")); + case 5: + return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), + randomPositiveLong(), randomBoolean(), randomBoolean(), + InetAddress.getByName("1.2.3.4"), InetAddress.getByName("1.2.3.4")); + default: + throw new IllegalArgumentException("Invalid type"); + } + } + + private void assertSerialization(FieldStats stats) throws IOException { + BytesStreamOutput output = new BytesStreamOutput(); + stats.writeTo(output); + output.flush(); + FieldStats deserializedStats = FieldStats.readFrom(output.bytes().streamInput()); + assertThat(stats, equalTo(deserializedStats)); + assertThat(stats.hashCode(), equalTo(deserializedStats.hashCode())); + } } From 8ec94a4ba03c37e897687128a0d7ff7f0697ee8f Mon Sep 17 00:00:00 2001 From: Alexander Guz Date: Fri, 16 Sep 2016 18:15:41 +0200 Subject: [PATCH 16/25] Edited response structure on indexing a document (#20517) Added "_shards" and "result" keys to the response. --- docs/reference/getting-started.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index fa257742bbe..be563bdafd3 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -302,6 +302,12 @@ curl -XPUT 'localhost:9200/customer/external/1?pretty' -d ' "_type" : "external", "_id" : "1", "_version" : 1, + "result" : "created", + "_shards" : { + "total" : 2, + "successful" : 1, + "failed" : 0 + }, "created" : true } -------------------------------------------------- From 697adfb3c44048690eeb13e5bf231f8e97dc0126 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 16 Sep 2016 10:26:54 -0400 Subject: [PATCH 17/25] Fix up tasks integ test I'd made some mistakes that hadn't caused the test to fail but did slow it down and partially invalidate some of the assertions. This fixes those mistakes. --- .../admin/cluster/node/tasks/TasksIT.java | 49 +++++++++++++------ 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index b2798b00fca..134477cc204 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -72,11 +72,11 @@ import java.util.Map; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Function; +import static java.util.Collections.emptyList; import static java.util.Collections.singleton; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; @@ -333,12 +333,11 @@ public class TasksIT extends ESIntegTestCase { * particular status results from indexing. For that, look at {@link TransportReplicationActionTests}. We intentionally don't use the * task recording mechanism used in other places in this test so we can make sure that the status fetching works properly over the wire. */ - public void testCanFetchIndexStatus() throws InterruptedException, ExecutionException, IOException { - /* We make sure all indexing tasks wait to start before this lock is *unlocked* so we can fetch their status with both the get and - * list APIs. */ + public void testCanFetchIndexStatus() throws Exception { + // First latch waits for the task to start, second on blocks it from finishing. CountDownLatch taskRegistered = new CountDownLatch(1); CountDownLatch letTaskFinish = new CountDownLatch(1); - ListenableActionFuture indexFuture = null; + Thread index = null; try { for (TransportService transportService : internalCluster().getInstances(TransportService.class)) { ((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() { @@ -348,7 +347,7 @@ public class TasksIT extends ESIntegTestCase { taskRegistered.countDown(); logger.debug("Blocking [{}] starting", task); try { - letTaskFinish.await(10, TimeUnit.SECONDS); + assertTrue(letTaskFinish.await(10, TimeUnit.SECONDS)); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -364,8 +363,13 @@ public class TasksIT extends ESIntegTestCase { } }); } - indexFuture = client().prepareIndex("test", "test").setSource("test", "test").execute(); - taskRegistered.await(10, TimeUnit.SECONDS); // waiting for at least one task to be registered + // Need to run the task in a separate thread because node client's .execute() is blocked by our task listener + index = new Thread(() -> { + IndexResponse indexResponse = client().prepareIndex("test", "test").setSource("test", "test").get(); + assertArrayEquals(ReplicationResponse.EMPTY, indexResponse.getShardInfo().getFailures()); + }); + index.start(); + assertTrue(taskRegistered.await(10, TimeUnit.SECONDS)); // waiting for at least one task to be registered ListTasksResponse listResponse = client().admin().cluster().prepareListTasks().setActions("indices:data/write/index*") .setDetailed(true).get(); @@ -387,10 +391,13 @@ public class TasksIT extends ESIntegTestCase { } } finally { letTaskFinish.countDown(); - if (indexFuture != null) { - IndexResponse indexResponse = indexFuture.get(); - assertArrayEquals(ReplicationResponse.EMPTY, indexResponse.getShardInfo().getFailures()); + if (index != null) { + index.join(); } + assertBusy(() -> { + assertEquals(emptyList(), + client().admin().cluster().prepareListTasks().setActions("indices:data/write/index*").get().getTasks()); + }); } } @@ -439,6 +446,9 @@ public class TasksIT extends ESIntegTestCase { }, response -> { assertThat(response.getNodeFailures(), empty()); assertThat(response.getTaskFailures(), empty()); + assertThat(response.getTasks(), hasSize(1)); + TaskInfo task = response.getTasks().get(0); + assertEquals(TestTaskPlugin.TestTaskAction.NAME, task.getAction()); }); } @@ -446,10 +456,12 @@ public class TasksIT extends ESIntegTestCase { waitForCompletionTestCase(false, id -> { return client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).execute(); }, response -> { - assertNotNull(response.getTask().getTask()); assertTrue(response.getTask().isCompleted()); // We didn't store the result so it won't come back when we wait assertNull(response.getTask().getResponse()); + // But the task's details should still be there because we grabbed a reference to the task before waiting for it to complete. + assertNotNull(response.getTask().getTask()); + assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction()); }); } @@ -457,10 +469,12 @@ public class TasksIT extends ESIntegTestCase { waitForCompletionTestCase(true, id -> { return client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).execute(); }, response -> { - assertNotNull(response.getTask().getTask()); assertTrue(response.getTask().isCompleted()); // We stored the task so we should get its results assertEquals(0, response.getTask().getResponseAsMap().get("failure_count")); + // The task's details should also be there + assertNotNull(response.getTask().getTask()); + assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction()); }); } @@ -490,6 +504,7 @@ public class TasksIT extends ESIntegTestCase { ((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() { @Override public void waitForTaskCompletion(Task task) { + waitForWaitingToStart.countDown(); } @Override @@ -498,7 +513,6 @@ public class TasksIT extends ESIntegTestCase { @Override public void onTaskUnregistered(Task task) { - waitForWaitingToStart.countDown(); } }); } @@ -506,7 +520,9 @@ public class TasksIT extends ESIntegTestCase { // Spin up a request to wait for the test task to finish waitResponseFuture = wait.apply(taskId); - // Wait for the wait to start + /* Wait for the wait to start. This should count down just *before* we wait for completion but after the list/get has got a + * reference to the running task. Because we unblock immediately after this the task may no longer be running for us to wait + * on which is fine. */ waitForWaitingToStart.await(); } finally { // Unblock the request so the wait for completion request can finish @@ -517,7 +533,8 @@ public class TasksIT extends ESIntegTestCase { T waitResponse = waitResponseFuture.get(); validator.accept(waitResponse); - future.get(); + TestTaskPlugin.NodesResponse response = future.get(); + assertEquals(emptyList(), response.failures()); } public void testListTasksWaitForTimeout() throws Exception { From 629e2b2aff99f9d534edd8a17c30f12716a1725f Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Fri, 16 Sep 2016 21:02:12 +0200 Subject: [PATCH 18/25] Throw error if query element doesn't end with END_OBJECT (#20528) * Throw error if query element doesn't end with END_OBJECT Followup to #20515 where we added validation that after we parse a query within a query element, we should not get a field name. Truth is that the only token allowed at that point is END_OBJECT, as our DSL allows only one single query within the query object: ``` { "query" : { "term" : { "field" : "value" } } } ``` We can then check that after parsing of the query we have an end_object that closes the query itself (which we already do). Following that we can check that the query object is immediately closed, as there are no other tokens that can be present in that position. Relates to #20515 --- .../index/query/BoolQueryBuilder.java | 4 --- .../index/query/QueryParseContext.java | 22 ++++++--------- .../index/query/BoolQueryBuilderTests.java | 18 ++++++++++--- .../FunctionScoreQueryBuilderTests.java | 27 ++++++++++++++++--- .../builder/SearchSourceBuilderTests.java | 13 +++++---- 5 files changed, 51 insertions(+), 33 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java index 7b375f125c9..8e877823431 100644 --- a/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java @@ -338,10 +338,6 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { default: throw new ParsingException(parser.getTokenLocation(), "[bool] query does not support [" + currentFieldName + "]"); } - if (parser.currentToken() != XContentParser.Token.END_OBJECT) { - throw new ParsingException(parser.getTokenLocation(), - "expected [END_OBJECT] but got [{}], possibly too many query clauses", parser.currentToken()); - } } else if (token == XContentParser.Token.START_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { switch (currentFieldName) { diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java index 7b5fa97825f..4fcecdf9f2a 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java @@ -25,11 +25,9 @@ import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptSettings; import java.io.IOException; import java.util.Objects; @@ -95,16 +93,12 @@ public class QueryParseContext implements ParseFieldMatcherSupplier { * Parses a query excluding the query element that wraps it */ public Optional parseInnerQueryBuilder() throws IOException { - // move to START object - XContentParser.Token token; if (parser.currentToken() != XContentParser.Token.START_OBJECT) { - token = parser.nextToken(); - if (token != XContentParser.Token.START_OBJECT) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new ParsingException(parser.getTokenLocation(), "[_na] query malformed, must start with start_object"); } } - token = parser.nextToken(); - if (token == XContentParser.Token.END_OBJECT) { + if (parser.nextToken() == XContentParser.Token.END_OBJECT) { // we encountered '{}' for a query clause String msg = "query malformed, empty clause found at [" + parser.getTokenLocation() +"]"; DEPRECATION_LOGGER.deprecated(msg); @@ -113,26 +107,26 @@ public class QueryParseContext implements ParseFieldMatcherSupplier { } return Optional.empty(); } - if (token != XContentParser.Token.FIELD_NAME) { + if (parser.currentToken() != XContentParser.Token.FIELD_NAME) { throw new ParsingException(parser.getTokenLocation(), "[_na] query malformed, no field after start_object"); } String queryName = parser.currentName(); // move to the next START_OBJECT - token = parser.nextToken(); - if (token != XContentParser.Token.START_OBJECT) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new ParsingException(parser.getTokenLocation(), "[" + queryName + "] query malformed, no start_object after query name"); } @SuppressWarnings("unchecked") Optional result = (Optional) indicesQueriesRegistry.lookup(queryName, parseFieldMatcher, parser.getTokenLocation()).fromXContent(this); + //end_object of the specific query (e.g. match, multi_match etc.) element if (parser.currentToken() != XContentParser.Token.END_OBJECT) { throw new ParsingException(parser.getTokenLocation(), "[" + queryName + "] malformed query, expected [END_OBJECT] but found [" + parser.currentToken() + "]"); } - parser.nextToken(); - if (parser.currentToken() == XContentParser.Token.FIELD_NAME) { + //end_object of the query object + if (parser.nextToken() != XContentParser.Token.END_OBJECT) { throw new ParsingException(parser.getTokenLocation(), - "[" + queryName + "] malformed query, unexpected [FIELD_NAME] found [" + parser.currentName() + "]"); + "[" + queryName + "] malformed query, expected [END_OBJECT] but found [" + parser.currentToken() + "]"); } return result; } diff --git a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java index 5f9c7e0881d..5e63c3868a7 100644 --- a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java @@ -365,12 +365,22 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase parseQuery(query, ParseFieldMatcher.EMPTY)); - assertEquals("[match] malformed query, unexpected [FIELD_NAME] found [match]", ex.getMessage()); + assertEquals("[match] malformed query, expected [END_OBJECT] but found [FIELD_NAME]", ex.getMessage()); } public void testRewrite() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index c280bc4a49e..0eab330e367 100644 --- a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -700,7 +700,7 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase messageMatcher) { + public void testMalformedQueryMultipleQueryElements() throws IOException { + String json = "{\n" + + " \"function_score\":{\n" + + " \"query\":{\n" + + " \"bool\":{\n" + + " \"must\":{\"match\":{\"field\":\"value\"}}" + + " }\n" + + " },\n" + + " \"query\":{\n" + + " \"bool\":{\n" + + " \"must\":{\"match\":{\"field\":\"value\"}}" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; + expectParsingException(json, "[query] is already defined."); + } + + private static void expectParsingException(String json, Matcher messageMatcher) { ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(e.getMessage(), messageMatcher); } - private void expectParsingException(String json, String message) { + private static void expectParsingException(String json, String message) { expectParsingException(json, equalTo("failed to parse [function_score] query. " + message)); } diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index fe324190106..6d0ebffa5bd 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -423,8 +423,9 @@ public class SearchSourceBuilderTests extends ESTestCase { } } - public void testInvalid() throws Exception { - String restContent = " { \"query\": {\n" + + public void testMultipleQueryObjectsAreRejected() throws Exception { + String restContent = + " { \"query\": {\n" + " \"multi_match\": {\n" + " \"query\": \"workd\",\n" + " \"fields\": [\"title^5\", \"plain_body\"]\n" + @@ -436,11 +437,9 @@ public class SearchSourceBuilderTests extends ESTestCase { " }\n" + " } }"; try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { - SearchSourceBuilder.fromXContent(createParseContext(parser), - searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers); - fail("invalid query syntax multiple keys under query"); - } catch (ParsingException e) { - assertThat(e.getMessage(), containsString("filters")); + ParsingException e = expectThrows(ParsingException.class, () -> SearchSourceBuilder.fromXContent(createParseContext(parser), + searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers)); + assertEquals("[multi_match] malformed query, expected [END_OBJECT] but found [FIELD_NAME]", e.getMessage()); } } From 4c726311e2c5ef825b37d8d049bbedcad6224216 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 15 Sep 2016 20:27:08 +0200 Subject: [PATCH 19/25] [TEST] introduce test plugin to inject random search ext elements in search request tests A few of our unit tests generate a random search request body nd run tests against it. The source can optionally contain ext elements under the ext sections, which can be parsed by plugins. With this commit we introduce a plugin so that the tests don't use the one from FetchSubPhasePluginIT anymore. They rather generate multiple search ext elements. The plugin can parse and deal with all those. This extends the test coverage as we may have multiple elements with random names. Took the chance to introduce a common test base class for search requests, called AbstractSearchTestCase, given that the setup phase is the same for all three tests around search source. Then we can have the setup isolated to the base class and the subclasses relying on it. Closes #17685 --- .../search/AbstractSearchTestCase.java | 512 ++++++++++++++++++ .../search/SearchRequestTests.java | 70 +-- .../builder/SearchSourceBuilderTests.java | 279 +--------- .../ShardSearchTransportRequestTests.java | 44 +- 4 files changed, 522 insertions(+), 383 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java diff --git a/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java b/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java new file mode 100644 index 00000000000..048416c25ef --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java @@ -0,0 +1,512 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilderTests; +import org.elasticsearch.search.rescore.QueryRescoreBuilderTests; +import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.slice.SliceBuilder; +import org.elasticsearch.search.sort.ScriptSortBuilder; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.search.suggest.SuggestBuilderTests; +import org.elasticsearch.test.AbstractQueryTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; + +public abstract class AbstractSearchTestCase extends ESTestCase { + + protected NamedWriteableRegistry namedWriteableRegistry; + protected SearchRequestParsers searchRequestParsers; + private TestSearchExtPlugin searchExtPlugin; + + public void setUp() throws Exception { + super.setUp(); + IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); + searchExtPlugin = new TestSearchExtPlugin(); + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.singletonList(searchExtPlugin)); + List entries = new ArrayList<>(); + entries.addAll(indicesModule.getNamedWriteables()); + entries.addAll(searchModule.getNamedWriteables()); + namedWriteableRegistry = new NamedWriteableRegistry(entries); + searchRequestParsers = searchModule.getSearchRequestParsers(); + } + + protected SearchSourceBuilder createSearchSourceBuilder() throws IOException { + SearchSourceBuilder builder = new SearchSourceBuilder(); + if (randomBoolean()) { + builder.from(randomIntBetween(0, 10000)); + } + if (randomBoolean()) { + builder.size(randomIntBetween(0, 10000)); + } + if (randomBoolean()) { + builder.explain(randomBoolean()); + } + if (randomBoolean()) { + builder.version(randomBoolean()); + } + if (randomBoolean()) { + builder.trackScores(randomBoolean()); + } + if (randomBoolean()) { + builder.minScore(randomFloat() * 1000); + } + if (randomBoolean()) { + builder.timeout(TimeValue.parseTimeValue(randomTimeValue(), null, "timeout")); + } + if (randomBoolean()) { + builder.terminateAfter(randomIntBetween(1, 100000)); + } + + switch(randomInt(2)) { + case 0: + builder.storedFields(); + break; + case 1: + builder.storedField("_none_"); + break; + case 2: + int fieldsSize = randomInt(25); + List fields = new ArrayList<>(fieldsSize); + for (int i = 0; i < fieldsSize; i++) { + fields.add(randomAsciiOfLengthBetween(5, 50)); + } + builder.storedFields(fields); + break; + default: + throw new IllegalStateException(); + } + + if (randomBoolean()) { + int scriptFieldsSize = randomInt(25); + for (int i = 0; i < scriptFieldsSize; i++) { + if (randomBoolean()) { + builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo"), randomBoolean()); + } else { + builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo")); + } + } + } + if (randomBoolean()) { + FetchSourceContext fetchSourceContext; + int branch = randomInt(5); + String[] includes = new String[randomIntBetween(0, 20)]; + for (int i = 0; i < includes.length; i++) { + includes[i] = randomAsciiOfLengthBetween(5, 20); + } + String[] excludes = new String[randomIntBetween(0, 20)]; + for (int i = 0; i < excludes.length; i++) { + excludes[i] = randomAsciiOfLengthBetween(5, 20); + } + switch (branch) { + case 0: + fetchSourceContext = new FetchSourceContext(randomBoolean()); + break; + case 1: + fetchSourceContext = new FetchSourceContext(includes, excludes); + break; + case 2: + fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)); + break; + case 3: + fetchSourceContext = new FetchSourceContext(true, includes, excludes); + break; + case 4: + fetchSourceContext = new FetchSourceContext(includes); + break; + case 5: + fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20)); + break; + default: + throw new IllegalStateException(); + } + builder.fetchSource(fetchSourceContext); + } + if (randomBoolean()) { + int size = randomIntBetween(0, 20); + List statsGroups = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + statsGroups.add(randomAsciiOfLengthBetween(5, 20)); + } + builder.stats(statsGroups); + } + if (randomBoolean()) { + int indexBoostSize = randomIntBetween(1, 10); + for (int i = 0; i < indexBoostSize; i++) { + builder.indexBoost(randomAsciiOfLengthBetween(5, 20), randomFloat() * 10); + } + } + if (randomBoolean()) { + builder.query(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20))); + } + if (randomBoolean()) { + builder.postFilter(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20))); + } + if (randomBoolean()) { + int numSorts = randomIntBetween(1, 5); + for (int i = 0; i < numSorts; i++) { + int branch = randomInt(5); + switch (branch) { + case 0: + builder.sort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values()))); + break; + case 1: + builder.sort(SortBuilders.geoDistanceSort(randomAsciiOfLengthBetween(5, 20), + AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values()))); + break; + case 2: + builder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values()))); + break; + case 3: + builder.sort(SortBuilders.scriptSort(new Script("foo"), + ScriptSortBuilder.ScriptSortType.NUMBER).order(randomFrom(SortOrder.values()))); + break; + case 4: + builder.sort(randomAsciiOfLengthBetween(5, 20)); + break; + case 5: + builder.sort(randomAsciiOfLengthBetween(5, 20), randomFrom(SortOrder.values())); + break; + } + } + } + + if (randomBoolean()) { + int numSearchFrom = randomIntBetween(1, 5); + // We build a json version of the search_from first in order to + // ensure that every number type remain the same before/after xcontent (de)serialization. + // This is not a problem because the final type of each field value is extracted from associated sort field. + // This little trick ensure that equals and hashcode are the same when using the xcontent serialization. + XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); + jsonBuilder.startObject(); + jsonBuilder.startArray("search_from"); + for (int i = 0; i < numSearchFrom; i++) { + int branch = randomInt(8); + switch (branch) { + case 0: + jsonBuilder.value(randomInt()); + break; + case 1: + jsonBuilder.value(randomFloat()); + break; + case 2: + jsonBuilder.value(randomLong()); + break; + case 3: + jsonBuilder.value(randomDouble()); + break; + case 4: + jsonBuilder.value(randomAsciiOfLengthBetween(5, 20)); + break; + case 5: + jsonBuilder.value(randomBoolean()); + break; + case 6: + jsonBuilder.value(randomByte()); + break; + case 7: + jsonBuilder.value(randomShort()); + break; + case 8: + jsonBuilder.value(new Text(randomAsciiOfLengthBetween(5, 20))); + break; + } + } + jsonBuilder.endArray(); + jsonBuilder.endObject(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(jsonBuilder.bytes()); + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + builder.searchAfter(SearchAfterBuilder.fromXContent(parser, null).getSortValues()); + } + if (randomBoolean()) { + builder.highlighter(HighlightBuilderTests.randomHighlighterBuilder()); + } + if (randomBoolean()) { + builder.suggest(SuggestBuilderTests.randomSuggestBuilder()); + } + if (randomBoolean()) { + int numRescores = randomIntBetween(1, 5); + for (int i = 0; i < numRescores; i++) { + builder.addRescorer(QueryRescoreBuilderTests.randomRescoreBuilder()); + } + } + if (randomBoolean()) { + builder.aggregation(AggregationBuilders.avg(randomAsciiOfLengthBetween(5, 20))); + } + if (randomBoolean()) { + Set elementNames = new HashSet<>(searchExtPlugin.getSupportedElements().keySet()); + int numSearchExts = randomIntBetween(1, elementNames.size()); + while(elementNames.size() > numSearchExts) { + elementNames.remove(randomFrom(elementNames)); + } + List searchExtBuilders = new ArrayList<>(); + for (String elementName : elementNames) { + searchExtBuilders.add(searchExtPlugin.getSupportedElements().get(elementName).apply(randomAsciiOfLengthBetween(3, 10))); + } + builder.ext(searchExtBuilders); + } + if (randomBoolean()) { + String field = randomBoolean() ? null : randomAsciiOfLengthBetween(5, 20); + int max = between(2, 1000); + int id = randomInt(max-1); + if (field == null) { + builder.slice(new SliceBuilder(id, max)); + } else { + builder.slice(new SliceBuilder(field, id, max)); + } + } + return builder; + } + + protected SearchRequest createSearchRequest() throws IOException { + SearchRequest searchRequest = new SearchRequest(); + if (randomBoolean()) { + searchRequest.indices(generateRandomStringArray(10, 10, false, false)); + } + if (randomBoolean()) { + searchRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + } + if (randomBoolean()) { + searchRequest.types(generateRandomStringArray(10, 10, false, false)); + } + if (randomBoolean()) { + searchRequest.preference(randomAsciiOfLengthBetween(3, 10)); + } + if (randomBoolean()) { + searchRequest.requestCache(randomBoolean()); + } + if (randomBoolean()) { + searchRequest.routing(randomAsciiOfLengthBetween(3, 10)); + } + if (randomBoolean()) { + searchRequest.scroll(randomPositiveTimeValue()); + } + if (randomBoolean()) { + searchRequest.searchType(randomFrom(SearchType.values())); + } + if (randomBoolean()) { + searchRequest.source(createSearchSourceBuilder()); + } + return searchRequest; + } + + private static class TestSearchExtPlugin extends Plugin implements SearchPlugin { + private final List> searchExtSpecs; + private final Map> supportedElements; + + private TestSearchExtPlugin() { + int numSearchExts = randomIntBetween(1, 3); + this.searchExtSpecs = new ArrayList<>(numSearchExts); + this.supportedElements = new HashMap<>(); + for (int i = 0; i < numSearchExts; i++) { + switch (randomIntBetween(0, 2)) { + case 0: + if (this.supportedElements.put(TestSearchExtBuilder1.NAME, TestSearchExtBuilder1::new) == null) { + this.searchExtSpecs.add(new SearchExtSpec<>(TestSearchExtBuilder1.NAME, TestSearchExtBuilder1::new, + new TestSearchExtParser<>(TestSearchExtBuilder1::new))); + } + break; + case 1: + if (this.supportedElements.put(TestSearchExtBuilder2.NAME, TestSearchExtBuilder2::new) == null) { + this.searchExtSpecs.add(new SearchExtSpec<>(TestSearchExtBuilder2.NAME, TestSearchExtBuilder2::new, + new TestSearchExtParser<>(TestSearchExtBuilder2::new))); + } + break; + case 2: + if (this.supportedElements.put(TestSearchExtBuilder3.NAME, TestSearchExtBuilder3::new) == null) { + this.searchExtSpecs.add(new SearchExtSpec<>(TestSearchExtBuilder3.NAME, TestSearchExtBuilder3::new, + new TestSearchExtParser<>(TestSearchExtBuilder3::new))); + } + break; + default: + throw new UnsupportedOperationException(); + } + } + } + + Map> getSupportedElements() { + return supportedElements; + } + + @Override + public List> getSearchExts() { + return searchExtSpecs; + } + } + + private static class TestSearchExtParser implements SearchExtParser { + private final Function searchExtBuilderFunction; + + TestSearchExtParser(Function searchExtBuilderFunction) { + this.searchExtBuilderFunction = searchExtBuilderFunction; + } + + @Override + public T fromXContent(XContentParser parser) throws IOException { + return searchExtBuilderFunction.apply(parseField(parser)); + } + + String parseField(XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new ParsingException(parser.getTokenLocation(), "start_object expected, found " + parser.currentToken()); + } + if (parser.nextToken() != XContentParser.Token.FIELD_NAME) { + throw new ParsingException(parser.getTokenLocation(), "field_name expected, found " + parser.currentToken()); + } + String field = parser.currentName(); + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ParsingException(parser.getTokenLocation(), "start_object expected, found " + parser.currentToken()); + } + if (parser.nextToken() != XContentParser.Token.END_OBJECT) { + throw new ParsingException(parser.getTokenLocation(), "end_object expected, found " + parser.currentToken()); + } + if (parser.nextToken() != XContentParser.Token.END_OBJECT) { + throw new ParsingException(parser.getTokenLocation(), "end_object expected, found " + parser.currentToken()); + } + return field; + } + } + + //Would be nice to have a single builder that gets its name as a parameter, but the name wouldn't get a value when the object + //is created reading from the stream (constructor that takes a StreamInput) which is a problem as we check that after reading + //a named writeable its name is the expected one. That's why we go for the following less dynamic approach. + private static class TestSearchExtBuilder1 extends TestSearchExtBuilder { + private static final String NAME = "name1"; + + TestSearchExtBuilder1(String field) { + super(NAME, field); + } + + TestSearchExtBuilder1(StreamInput in) throws IOException { + super(NAME, in); + } + } + + private static class TestSearchExtBuilder2 extends TestSearchExtBuilder { + private static final String NAME = "name2"; + + TestSearchExtBuilder2(String field) { + super(NAME, field); + } + + TestSearchExtBuilder2(StreamInput in) throws IOException { + super(NAME, in); + } + } + + private static class TestSearchExtBuilder3 extends TestSearchExtBuilder { + private static final String NAME = "name3"; + + TestSearchExtBuilder3(String field) { + super(NAME, field); + } + + TestSearchExtBuilder3(StreamInput in) throws IOException { + super(NAME, in); + } + } + + private abstract static class TestSearchExtBuilder extends SearchExtBuilder { + final String objectName; + protected final String name; + + TestSearchExtBuilder(String name, String objectName) { + this.name = name; + this.objectName = objectName; + } + + TestSearchExtBuilder(String name, StreamInput in) throws IOException { + this.name = name; + this.objectName = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(objectName); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + TestSearchExtBuilder that = (TestSearchExtBuilder) o; + return Objects.equals(objectName, that.objectName) && + Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(objectName, name); + } + + @Override + public String getWriteableName() { + return name; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(name); + builder.startObject(objectName); + builder.endObject(); + builder.endObject(); + return builder; + } + } +} diff --git a/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java b/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java index 2c7ae356bf8..6f48dbe4911 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java @@ -24,49 +24,13 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.indices.IndicesModule; -import org.elasticsearch.search.fetch.FetchSubPhasePluginIT; -import org.elasticsearch.test.ESTestCase; -import org.junit.AfterClass; -import org.junit.BeforeClass; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import static java.util.Collections.emptyList; -import static org.elasticsearch.search.builder.SearchSourceBuilderTests.createSearchSourceBuilder; - -public class SearchRequestTests extends ESTestCase { - - private static NamedWriteableRegistry namedWriteableRegistry; - - @BeforeClass - public static void beforeClass() { - IndicesModule indicesModule = new IndicesModule(emptyList()) { - @Override - protected void configure() { - bindMapperExtension(); - } - }; - SearchModule searchModule = new SearchModule(Settings.EMPTY, false, - Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())); - List entries = new ArrayList<>(); - entries.addAll(indicesModule.getNamedWriteables()); - entries.addAll(searchModule.getNamedWriteables()); - namedWriteableRegistry = new NamedWriteableRegistry(entries); - } - - @AfterClass - public static void afterClass() { - namedWriteableRegistry = null; - } +public class SearchRequestTests extends AbstractSearchTestCase { public void testSerialization() throws Exception { SearchRequest searchRequest = createSearchRequest(); @@ -204,38 +168,6 @@ public class SearchRequestTests extends ESTestCase { } } - public static SearchRequest createSearchRequest() throws IOException { - SearchRequest searchRequest = new SearchRequest(); - if (randomBoolean()) { - searchRequest.indices(generateRandomStringArray(10, 10, false, false)); - } - if (randomBoolean()) { - searchRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); - } - if (randomBoolean()) { - searchRequest.types(generateRandomStringArray(10, 10, false, false)); - } - if (randomBoolean()) { - searchRequest.preference(randomAsciiOfLengthBetween(3, 10)); - } - if (randomBoolean()) { - searchRequest.requestCache(randomBoolean()); - } - if (randomBoolean()) { - searchRequest.routing(randomAsciiOfLengthBetween(3, 10)); - } - if (randomBoolean()) { - searchRequest.scroll(randomPositiveTimeValue()); - } - if (randomBoolean()) { - searchRequest.searchType(randomFrom(SearchType.values())); - } - if (randomBoolean()) { - searchRequest.source(createSearchSourceBuilder()); - } - return searchRequest; - } - private static SearchRequest copyRequest(SearchRequest searchRequest) throws IOException { SearchRequest result = new SearchRequest(); result.indices(searchRequest.indices()); diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 6d0ebffa5bd..43ea81e993c 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -26,297 +26,28 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.env.Environment; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.indices.IndicesModule; -import org.elasticsearch.script.Script; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.SearchRequestParsers; -import org.elasticsearch.search.aggregations.AggregationBuilders; -import org.elasticsearch.search.fetch.FetchSubPhasePluginIT; -import org.elasticsearch.search.fetch.subphase.FetchSourceContext; -import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilderTests; -import org.elasticsearch.search.rescore.QueryRescoreBuilderTests; +import org.elasticsearch.search.AbstractSearchTestCase; import org.elasticsearch.search.rescore.QueryRescorerBuilder; -import org.elasticsearch.search.searchafter.SearchAfterBuilder; -import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; -import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; -import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.search.suggest.SuggestBuilderTests; -import org.elasticsearch.test.AbstractQueryTestCase; -import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; -public class SearchSourceBuilderTests extends ESTestCase { - - private NamedWriteableRegistry namedWriteableRegistry; - - private SearchRequestParsers searchRequestParsers; - - private ParseFieldMatcher parseFieldMatcher; - - public void setUp() throws Exception { - super.setUp(); - // we have to prefer CURRENT since with the range of versions we support - // it's rather unlikely to get the current actually. - Settings settings = Settings.builder() - .put("node.name", AbstractQueryTestCase.class.toString()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); - IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); - SearchModule searchModule = new SearchModule(settings, false, - Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())); - List entries = new ArrayList<>(); - entries.addAll(indicesModule.getNamedWriteables()); - entries.addAll(searchModule.getNamedWriteables()); - namedWriteableRegistry = new NamedWriteableRegistry(entries); - searchRequestParsers = searchModule.getSearchRequestParsers(); - parseFieldMatcher = ParseFieldMatcher.STRICT; - } - - public static SearchSourceBuilder createSearchSourceBuilder() throws IOException { - SearchSourceBuilder builder = new SearchSourceBuilder(); - if (randomBoolean()) { - builder.from(randomIntBetween(0, 10000)); - } - if (randomBoolean()) { - builder.size(randomIntBetween(0, 10000)); - } - if (randomBoolean()) { - builder.explain(randomBoolean()); - } - if (randomBoolean()) { - builder.version(randomBoolean()); - } - if (randomBoolean()) { - builder.trackScores(randomBoolean()); - } - if (randomBoolean()) { - builder.minScore(randomFloat() * 1000); - } - if (randomBoolean()) { - builder.timeout(TimeValue.parseTimeValue(randomTimeValue(), null, "timeout")); - } - if (randomBoolean()) { - builder.terminateAfter(randomIntBetween(1, 100000)); - } - // if (randomBoolean()) { - // builder.defaultRescoreWindowSize(randomIntBetween(1, 100)); - // } - - switch(randomInt(2)) { - case 0: - builder.storedFields(); - break; - case 1: - builder.storedField("_none_"); - break; - case 2: - int fieldsSize = randomInt(25); - List fields = new ArrayList<>(fieldsSize); - for (int i = 0; i < fieldsSize; i++) { - fields.add(randomAsciiOfLengthBetween(5, 50)); - } - builder.storedFields(fields); - break; - default: - throw new IllegalStateException(); - } - - if (randomBoolean()) { - int scriptFieldsSize = randomInt(25); - for (int i = 0; i < scriptFieldsSize; i++) { - if (randomBoolean()) { - builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo"), randomBoolean()); - } else { - builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo")); - } - } - } - if (randomBoolean()) { - FetchSourceContext fetchSourceContext; - int branch = randomInt(5); - String[] includes = new String[randomIntBetween(0, 20)]; - for (int i = 0; i < includes.length; i++) { - includes[i] = randomAsciiOfLengthBetween(5, 20); - } - String[] excludes = new String[randomIntBetween(0, 20)]; - for (int i = 0; i < excludes.length; i++) { - excludes[i] = randomAsciiOfLengthBetween(5, 20); - } - switch (branch) { - case 0: - fetchSourceContext = new FetchSourceContext(randomBoolean()); - break; - case 1: - fetchSourceContext = new FetchSourceContext(includes, excludes); - break; - case 2: - fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)); - break; - case 3: - fetchSourceContext = new FetchSourceContext(true, includes, excludes); - break; - case 4: - fetchSourceContext = new FetchSourceContext(includes); - break; - case 5: - fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20)); - break; - default: - throw new IllegalStateException(); - } - builder.fetchSource(fetchSourceContext); - } - if (randomBoolean()) { - int size = randomIntBetween(0, 20); - List statsGroups = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - statsGroups.add(randomAsciiOfLengthBetween(5, 20)); - } - builder.stats(statsGroups); - } - if (randomBoolean()) { - int indexBoostSize = randomIntBetween(1, 10); - for (int i = 0; i < indexBoostSize; i++) { - builder.indexBoost(randomAsciiOfLengthBetween(5, 20), randomFloat() * 10); - } - } - if (randomBoolean()) { - builder.query(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20))); - } - if (randomBoolean()) { - builder.postFilter(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20))); - } - if (randomBoolean()) { - int numSorts = randomIntBetween(1, 5); - for (int i = 0; i < numSorts; i++) { - int branch = randomInt(5); - switch (branch) { - case 0: - builder.sort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values()))); - break; - case 1: - builder.sort(SortBuilders.geoDistanceSort(randomAsciiOfLengthBetween(5, 20), - AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values()))); - break; - case 2: - builder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values()))); - break; - case 3: - builder.sort(SortBuilders.scriptSort(new Script("foo"), - ScriptSortType.NUMBER).order(randomFrom(SortOrder.values()))); - break; - case 4: - builder.sort(randomAsciiOfLengthBetween(5, 20)); - break; - case 5: - builder.sort(randomAsciiOfLengthBetween(5, 20), randomFrom(SortOrder.values())); - break; - } - } - } - - if (randomBoolean()) { - int numSearchFrom = randomIntBetween(1, 5); - // We build a json version of the search_from first in order to - // ensure that every number type remain the same before/after xcontent (de)serialization. - // This is not a problem because the final type of each field value is extracted from associated sort field. - // This little trick ensure that equals and hashcode are the same when using the xcontent serialization. - XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); - jsonBuilder.startObject(); - jsonBuilder.startArray("search_from"); - for (int i = 0; i < numSearchFrom; i++) { - int branch = randomInt(8); - switch (branch) { - case 0: - jsonBuilder.value(randomInt()); - break; - case 1: - jsonBuilder.value(randomFloat()); - break; - case 2: - jsonBuilder.value(randomLong()); - break; - case 3: - jsonBuilder.value(randomDouble()); - break; - case 4: - jsonBuilder.value(randomAsciiOfLengthBetween(5, 20)); - break; - case 5: - jsonBuilder.value(randomBoolean()); - break; - case 6: - jsonBuilder.value(randomByte()); - break; - case 7: - jsonBuilder.value(randomShort()); - break; - case 8: - jsonBuilder.value(new Text(randomAsciiOfLengthBetween(5, 20))); - break; - } - } - jsonBuilder.endArray(); - jsonBuilder.endObject(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(jsonBuilder.bytes()); - parser.nextToken(); - parser.nextToken(); - parser.nextToken(); - builder.searchAfter(SearchAfterBuilder.fromXContent(parser, null).getSortValues()); - } - if (randomBoolean()) { - builder.highlighter(HighlightBuilderTests.randomHighlighterBuilder()); - } - if (randomBoolean()) { - builder.suggest(SuggestBuilderTests.randomSuggestBuilder()); - } - if (randomBoolean()) { - int numRescores = randomIntBetween(1, 5); - for (int i = 0; i < numRescores; i++) { - builder.addRescorer(QueryRescoreBuilderTests.randomRescoreBuilder()); - } - } - if (randomBoolean()) { - builder.aggregation(AggregationBuilders.avg(randomAsciiOfLengthBetween(5, 20))); - } - if (randomBoolean()) { - builder.ext(Collections.singletonList(new FetchSubPhasePluginIT.TermVectorsFetchBuilder("test"))); - } - if (randomBoolean()) { - String field = randomBoolean() ? null : randomAsciiOfLengthBetween(5, 20); - int max = between(2, 1000); - int id = randomInt(max-1); - if (field == null) { - builder.slice(new SliceBuilder(id, max)); - } else { - builder.slice(new SliceBuilder(field, id, max)); - } - } - return builder; - } +public class SearchSourceBuilderTests extends AbstractSearchTestCase { public void testFromXContent() throws IOException { SearchSourceBuilder testSearchSourceBuilder = createSearchSourceBuilder(); @@ -348,7 +79,7 @@ public class SearchSourceBuilderTests extends ESTestCase { } private QueryParseContext createParseContext(XContentParser parser) { - return new QueryParseContext(searchRequestParsers.queryParsers, parser, parseFieldMatcher); + return new QueryParseContext(searchRequestParsers.queryParsers, parser, ParseFieldMatcher.STRICT); } public void testSerialization() throws IOException { @@ -392,7 +123,7 @@ public class SearchSourceBuilderTests extends ESTestCase { } //we use the streaming infra to create a copy of the builder provided as argument - protected SearchSourceBuilder copyBuilder(SearchSourceBuilder builder) throws IOException { + private SearchSourceBuilder copyBuilder(SearchSourceBuilder builder) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { builder.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { @@ -610,7 +341,7 @@ public class SearchSourceBuilderTests extends ESTestCase { } } - private void createIndexBoost(SearchSourceBuilder searchSourceBuilder) { + private static void createIndexBoost(SearchSourceBuilder searchSourceBuilder) { int indexBoostSize = randomIntBetween(1, 10); for (int i = 0; i < indexBoostSize; i++) { searchSourceBuilder.indexBoost(randomAsciiOfLengthBetween(5, 20), randomFloat() * 10); diff --git a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index 99a2b438ffd..452b6b6ba3a 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -27,49 +27,13 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.IndicesModule; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.SearchRequestTests; -import org.elasticsearch.search.fetch.FetchSubPhasePluginIT; -import org.elasticsearch.test.ESTestCase; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.elasticsearch.search.AbstractSearchTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import static java.util.Collections.emptyList; - -public class ShardSearchTransportRequestTests extends ESTestCase { - - private static NamedWriteableRegistry namedWriteableRegistry; - - @BeforeClass - public static void beforeClass() { - IndicesModule indicesModule = new IndicesModule(emptyList()) { - @Override - protected void configure() { - bindMapperExtension(); - } - }; - SearchModule searchModule = new SearchModule(Settings.EMPTY, false, - Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())); - List entries = new ArrayList<>(); - entries.addAll(indicesModule.getNamedWriteables()); - entries.addAll(searchModule.getNamedWriteables()); - namedWriteableRegistry = new NamedWriteableRegistry(entries); - } - - @AfterClass - public static void afterClass() { - namedWriteableRegistry = null; - } +public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { public void testSerialization() throws Exception { ShardSearchTransportRequest shardSearchTransportRequest = createShardSearchTransportRequest(); @@ -95,8 +59,8 @@ public class ShardSearchTransportRequestTests extends ESTestCase { } } - private static ShardSearchTransportRequest createShardSearchTransportRequest() throws IOException { - SearchRequest searchRequest = SearchRequestTests.createSearchRequest(); + private ShardSearchTransportRequest createShardSearchTransportRequest() throws IOException { + SearchRequest searchRequest = createSearchRequest(); ShardId shardId = new ShardId(randomAsciiOfLengthBetween(2, 10), randomAsciiOfLengthBetween(2, 10), randomInt()); ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "reason")); From 20badcdbab187951e10f4cdf24525364c5590fa8 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 15 Sep 2016 20:28:12 +0200 Subject: [PATCH 20/25] [TEST] set back the visibility of our search ext plugin in FetchSubPhasePluginIT to private This plugin is not used in other tests anymore, it should be private to make sure its usage doesn't spread again --- .../search/fetch/FetchSubPhasePluginIT.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index 87965365fd1..02def44416e 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -118,7 +118,7 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase { } } - public static final class TermVectorsFetchSubPhase implements FetchSubPhase { + private static final class TermVectorsFetchSubPhase implements FetchSubPhase { private static final String NAME = "term_vectors_fetch"; @Override @@ -153,7 +153,7 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase { } } - public static final class TermVectorsFetchParser implements SearchExtParser { + private static final class TermVectorsFetchParser implements SearchExtParser { private static final TermVectorsFetchParser INSTANCE = new TermVectorsFetchParser(); @@ -176,18 +176,18 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase { } } - public static final class TermVectorsFetchBuilder extends SearchExtBuilder { + private static final class TermVectorsFetchBuilder extends SearchExtBuilder { private final String field; - public TermVectorsFetchBuilder(String field) { + private TermVectorsFetchBuilder(String field) { this.field = field; } - public TermVectorsFetchBuilder(StreamInput in) throws IOException { + private TermVectorsFetchBuilder(StreamInput in) throws IOException { this.field = in.readString(); } - public String getField() { + private String getField() { return field; } From 7097f4943c03420d9a7332d043438e215d8ea52d Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 16 Sep 2016 11:11:24 +0200 Subject: [PATCH 21/25] [TEST] delete specific index boost serialization test, already covered by testSerialization indexBoost is already randomly set, we don't need a specific test for it in SearchSourceBuilderTests --- .../builder/SearchSourceBuilderTests.java | 21 ------------------- 1 file changed, 21 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 43ea81e993c..967af3d3afc 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -326,25 +326,4 @@ public class SearchSourceBuilderTests extends AbstractSearchTestCase { String query = "{ \"query\": {} }"; assertParseSearchSource(builder, new BytesArray(query), ParseFieldMatcher.EMPTY); } - - public void testSearchRequestBuilderSerializationWithIndexBoost() throws Exception { - SearchSourceBuilder searchSourceBuilder = createSearchSourceBuilder(); - createIndexBoost(searchSourceBuilder); - try (BytesStreamOutput output = new BytesStreamOutput()) { - searchSourceBuilder.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - SearchSourceBuilder deserializedSearchSourceBuilder = new SearchSourceBuilder(in); - BytesStreamOutput deserializedOutput = new BytesStreamOutput(); - deserializedSearchSourceBuilder.writeTo(deserializedOutput); - assertEquals(output.bytes(), deserializedOutput.bytes()); - } - } - } - - private static void createIndexBoost(SearchSourceBuilder searchSourceBuilder) { - int indexBoostSize = randomIntBetween(1, 10); - for (int i = 0; i < indexBoostSize; i++) { - searchSourceBuilder.indexBoost(randomAsciiOfLengthBetween(5, 20), randomFloat() * 10); - } - } } From f608e6c6cf06742b87f67263942e2c6a984f7c78 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Fri, 16 Sep 2016 16:06:18 -0400 Subject: [PATCH 22/25] Improves the documentation for the (#20531) `cluster.routing.allocation.cluster_concurrent_rebalance` setting, clarifying in which shard allocation situations the rebalance limit takes effect. Closes #20529 --- docs/reference/modules/cluster/shards_allocation.asciidoc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc index c5b21979030..48a77816a3b 100644 --- a/docs/reference/modules/cluster/shards_allocation.asciidoc +++ b/docs/reference/modules/cluster/shards_allocation.asciidoc @@ -85,7 +85,11 @@ Specify when shard rebalancing is allowed: `cluster.routing.allocation.cluster_concurrent_rebalance`:: Allow to control how many concurrent shard rebalances are - allowed cluster wide. Defaults to `2`. + allowed cluster wide. Defaults to `2`. Note that this setting + only controls the number of concurrent shard relocations due + to imbalances in the cluster. This setting does not limit shard + relocations due to <> + or <>. [float] === Shard Balancing Heuristics From c1e8b6a8ba60bc40701b752177bb23102407d6f7 Mon Sep 17 00:00:00 2001 From: Alexander Guz Date: Sat, 17 Sep 2016 22:58:29 +0200 Subject: [PATCH 23/25] Fixed aggregation by "gender" request example. `gender.keyword` should be used instead of just `gender` or we get an error `Fielddata is disabled on text fields by default. Set fielddata=true on [gender] in order to load fielddata in memory by uninverting the inverted index. Note that this can however use Closes #20535 significant memory.` --- docs/reference/getting-started.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index be563bdafd3..41404d6155c 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -1059,7 +1059,7 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' "aggs": { "group_by_gender": { "terms": { - "field": "gender" + "field": "gender.keyword" }, "aggs": { "average_balance": { From 135a19e7a159ef0e8ab37cb84a6dfa73baa99711 Mon Sep 17 00:00:00 2001 From: Alexander Guz Date: Sat, 17 Sep 2016 22:53:53 +0200 Subject: [PATCH 24/25] Added wildcards imports configuration for IntelliJ IDEA Closes #20534 --- CONTRIBUTING.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b0f1e054e46..da81436b8ad 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -120,7 +120,8 @@ Please follow these formatting guidelines: * The rest is left to Java coding standards * Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do. * Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them. - * Eclipse: Preferences->Java->Code Style->Organize Imports. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value. + * Eclipse: `Preferences->Java->Code Style->Organize Imports`. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value. + * IntelliJ: `Preferences->Editor->Code Style->Java->Imports`. There are two configuration options: `Class count to use import with '*'` and `Names count to use static import with '*'`. Set their values to 99999 or some other absurdly high value. * Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so. To create a distribution from the source, simply run: From 18944898326cfea157341b99002a882799fb984f Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 16 Sep 2016 11:50:10 +0200 Subject: [PATCH 25/25] [DOC] Update /_cat/nodes doc closes #20162 --- docs/reference/cat/nodes.asciidoc | 69 +++++++++++++------------------ 1 file changed, 28 insertions(+), 41 deletions(-) diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index 288a28f3642..b0b152d4c50 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -3,48 +3,35 @@ The `nodes` command shows the cluster topology. -["source","sh",subs="attributes,callouts"] +[source,sh] -------------------------------------------------- -% curl 192.168.56.10:9200/_cat/nodes -SP4H 4727 192.168.56.30 9300 {version} {jdk} 72.1gb 35.4 93.9mb 79 239.1mb 0.45 3.4h mdi - Boneyard -_uhJ 5134 192.168.56.10 9300 {version} {jdk} 72.1gb 33.3 93.9mb 85 239.1mb 0.06 3.4h mdi * Athena -HfDp 4562 192.168.56.20 9300 {version} {jdk} 72.2gb 74.5 93.9mb 83 239.1mb 0.12 3.4h mdi - Zarek +% GET /_cat/nodes +192.168.56.30 9 78 22 1.80 2.05 2.51 mdi * bGG90GE +192.168.56.10 6 75 14 1.24 2.45 1.37 md - I8hydUG +192.168.56.20 5 71 12 1.07 1.05 1.11 di - H5dfFeA -------------------------------------------------- -The first few columns tell you where your nodes live. For sanity it -also tells you what version of ES and the JVM each one runs. - -["source","sh",subs="attributes,callouts"] --------------------------------------------------- -nodeId pid ip port version jdk -u2PZ 4234 192.168.56.30 9300 {version} {jdk} -URzf 5443 192.168.56.10 9300 {version} {jdk} -ActN 3806 192.168.56.20 9300 {version} {jdk} --------------------------------------------------- - - -The next few give a picture of your heap, memory, and load. +The first few columns tell you where your nodes live and give +a picture of your heap, memory, cpu and load. [source,sh] -------------------------------------------------- -diskAvail heapPercent heapMax ramPercent ramMax load - 72.1gb 31.3 93.9mb 81 239.1mb 0.24 - 72.1gb 19.6 93.9mb 82 239.1mb 0.05 - 72.2gb 64.9 93.9mb 84 239.1mb 0.12 +ip heap.percent ram.percent cpu load_1m load_5m load_15m +192.168.56.30 9 78 22 1.80 2.05 2.51 +192.168.56.10 6 75 14 1.24 2.45 1.37 +192.168.56.20 5 71 12 1.07 1.05 1.11 -------------------------------------------------- The last columns provide ancillary information that can often be useful when looking at the cluster as a whole, particularly large -ones. How many master-eligible nodes do I have? How many client -nodes? It looks like someone restarted a node recently; which one was -it? +ones. How many master-eligible nodes do I have? [source,sh] -------------------------------------------------- -uptime node.role master name - 3.5h di - Boneyard - 3.5h md * Athena - 3.5h i - Zarek +node.role master name +mdi * bGG90GE +md - I8hydUG +di - H5dfFeA -------------------------------------------------- [float] @@ -65,7 +52,7 @@ by default. To have the headers appear in the output, use verbose mode (`v`). The header name will match the supplied value (e.g., `pid` versus `p`). For example: -["source","sh",subs="attributes,callouts"] +[source,sh] -------------------------------------------------- % curl 192.168.56.10:9200/_cat/nodes?v&h=id,ip,port,v,m id ip port v m @@ -102,13 +89,15 @@ descriptors |123 descriptors percentage |1 |`file_desc.max` |`fdm`, `fileDescriptorMax` |No |Maximum number of file descriptors |1024 -|`load` |`l` |No |Most recent load average |0.22 |`cpu` | |No |Recent system CPU usage as percent |12 +|`load_1m` |`l` |No |Most recent load average |0.22 +|`load_5m` |`l` |No |Load average for the last five minutes |0.78 +|`load_15m` |`l` |No |Load average for the last fifteen minutes |1.24 |`uptime` |`u` |No |Node uptime |17.3m |`node.role` |`r`, `role`, `nodeRole` |Yes |Master eligible node (m); Data node (d); Ingest node (i); Coordinating node only (-) |mdi |`master` |`m` |Yes |Elected master (*); Not elected master (-) |* -|`name` |`n` |Yes |Node name |Venom +|`name` |`n` |Yes |Node name |I8hydUG |`completion.size` |`cs`, `completionSize` |No |Size of completion |0b |`fielddata.memory_size` |`fm`, `fielddataMemory` |No |Used fielddata cache memory |0b @@ -152,6 +141,8 @@ of current indexing operations |0 indexing |134ms |`indexing.index_total` |`iito`, `indexingIndexTotal` |No |Number of indexing operations |1 +|`indexing.index_failed` |`iif`, `indexingIndexFailed` |No |Number of +failed indexing operations |0 |`merges.current` |`mc`, `mergesCurrent` |No |Number of current merge operations |0 |`merges.current_docs` |`mcd`, `mergesCurrentDocs` |No |Number of @@ -166,15 +157,6 @@ documents |0 merges |0b |`merges.total_time` |`mtt`, `mergesTotalTime` |No |Time spent merging documents |0s -|`percolate.current` |`pc`, `percolateCurrent` |No |Number of current -percolations |0 -|`percolate.memory_size` |`pm`, `percolateMemory` |No |Memory used by -current percolations |0b -|`percolate.queries` |`pq`, `percolateQueries` |No |Number of -registered percolation queries |0 -|`percolate.time` |`pti`, `percolateTime` |No |Time spent -percolating |0s -|`percolate.total` |`pto`, `percolateTotal` |No |Total percolations |0 |`refresh.total` |`rto`, `refreshTotal` |No |Number of refreshes |16 |`refresh.time` |`rti`, `refreshTime` |No |Time spent in refreshes |91ms |`script.compilations` |`scrcc`, `scriptCompilations` |No |Total script compilations |17 @@ -203,4 +185,9 @@ segments |1.4kb |Memory used by index writer |18mb |`segments.version_map_memory` |`svmm`, `segmentsVersionMapMemory` |No |Memory used by version map |1.0kb +|`segments.fixed_bitset_memory` |`sfbm`, `fixedBitsetMemory` |No +|Memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields |1.0kb +|`suggest.current` |`suc`, `suggestCurrent` |No |Number of current suggest operations |0 +|`suggest.time` |`suti`, `suggestTime` |No |Time spent in suggest |0 +|`suggest.total` |`suto`, `suggestTotal` |No |Number of suggest operations |0 |=======================================================================