From 72ee65f914099f83f30c46dd35ccc4836ef3f50c Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 16 Dec 2016 18:11:55 +0100 Subject: [PATCH 001/119] Add documentation for Delete By Query Java API Closes #22114 --- docs/java-api/docs.asciidoc | 1 + docs/java-api/docs/delete.asciidoc | 45 ++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/docs/java-api/docs.asciidoc b/docs/java-api/docs.asciidoc index 9ace406ee42..c355714bdd6 100644 --- a/docs/java-api/docs.asciidoc +++ b/docs/java-api/docs.asciidoc @@ -7,6 +7,7 @@ This section describes the following CRUD APIs: * <> * <> * <> +* <> * <> .Multi-document APIs diff --git a/docs/java-api/docs/delete.asciidoc b/docs/java-api/docs/delete.asciidoc index b9efd01242d..392132aa129 100644 --- a/docs/java-api/docs/delete.asciidoc +++ b/docs/java-api/docs/delete.asciidoc @@ -35,3 +35,48 @@ DeleteResponse response = client.prepareDelete("twitter", "tweet", "1") .setOperationThreaded(false) .get(); -------------------------------------------------- + +[[java-docs-delete-by-query]] +=== Delete By Query API + +The delete by query API allows one to delete a given set of documents based on +the result of a query: + +[source,java] +-------------------------------------------------- +BulkIndexByScrollResponse response = + DeleteByQueryAction.INSTANCE.newRequestBuilder(client) + .filter(QueryBuilders.matchQuery("gender", "male")) <1> + .source("persons") <2> + .get(); <3> + +long deleted = response.getDeleted(); <4> +-------------------------------------------------- +<1> query +<2> index +<3> execute the operation +<4> number of deleted documents + +As it can be a long running operation, if you wish to do it asynchronously, you can call `execute` instead of `get` +and provide a listener like: + +[source,java] +-------------------------------------------------- +DeleteByQueryAction.INSTANCE.newRequestBuilder(client) + .filter(QueryBuilders.matchQuery("gender", "male")) <1> + .source("persons") <2> + .execute(new ActionListener() { <3> + @Override + public void onResponse(BulkIndexByScrollResponse response) { + long deleted = response.getDeleted(); <4> + } + @Override + public void onFailure(Exception e) { + // Handle the exception + } + }); +-------------------------------------------------- +<1> query +<2> index +<3> listener +<4> number of deleted documents From 80843afb192f5a79a7e1fd140a708d71742197d1 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 21 Dec 2016 12:13:16 +0100 Subject: [PATCH 002/119] Adds more information about ingest attachment properties extraction This is coming from thsi thread on discuss: https://discuss.elastic.co/t/ingest-attachment-plugin-exception/69167/10 --- docs/plugins/ingest-attachment.asciidoc | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/docs/plugins/ingest-attachment.asciidoc b/docs/plugins/ingest-attachment.asciidoc index 44ff41a6dfb..38db27ca42c 100644 --- a/docs/plugins/ingest-attachment.asciidoc +++ b/docs/plugins/ingest-attachment.asciidoc @@ -52,7 +52,7 @@ The node must be stopped before removing the plugin. | `field` | yes | - | The field to get the base64 encoded field from | `target_field` | no | attachment | The field that will hold the attachment information | `indexed_chars` | no | 100000 | The number of chars being used for extraction to prevent huge fields. Use `-1` for no limit. -| `properties` | no | all | Properties to select to be stored. Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language` +| `properties` | no | all properties | Properties to select to be stored. Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language` | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document |====== @@ -102,6 +102,25 @@ Returns this: -------------------------------------------------- // TESTRESPONSE + +To specify only some fields to be extracted: + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/attachment +{ + "description" : "Extract attachment information", + "processors" : [ + { + "attachment" : { + "field" : "data", + "properties": [ "content", "title" ] + } + } + ] +} +-------------------------------------------------- + NOTE: Extracting contents from binary data is a resource intensive operation and consumes a lot of resources. It is highly recommended to run pipelines using this processor in a dedicated ingest node. From 6d96cdb87ce3d5883a097f01e52ca30965db645f Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 21 Dec 2016 12:46:01 +0100 Subject: [PATCH 003/119] Add // CONSOLE to the snippet --- docs/plugins/ingest-attachment.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/plugins/ingest-attachment.asciidoc b/docs/plugins/ingest-attachment.asciidoc index 38db27ca42c..642058b2f23 100644 --- a/docs/plugins/ingest-attachment.asciidoc +++ b/docs/plugins/ingest-attachment.asciidoc @@ -120,6 +120,7 @@ PUT _ingest/pipeline/attachment ] } -------------------------------------------------- +// CONSOLE NOTE: Extracting contents from binary data is a resource intensive operation and consumes a lot of resources. It is highly recommended to run pipelines From d404ee35333a9aa16a96c9d1c991bbee17f95005 Mon Sep 17 00:00:00 2001 From: gameldar Date: Thu, 22 Dec 2016 00:18:33 +0800 Subject: [PATCH 004/119] Add ingest-attachment-with-arrays section to ingest attachments doc Added a new section detailing how to use the attachment processor within an array. This reverts commit #22296 and instead links to the foreach processor. --- docs/plugins/ingest-attachment.asciidoc | 52 +++++++++++++++++++++- docs/reference/ingest/ingest-node.asciidoc | 4 ++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/docs/plugins/ingest-attachment.asciidoc b/docs/plugins/ingest-attachment.asciidoc index 44ff41a6dfb..1471fbdae0d 100644 --- a/docs/plugins/ingest-attachment.asciidoc +++ b/docs/plugins/ingest-attachment.asciidoc @@ -106,4 +106,54 @@ NOTE: Extracting contents from binary data is a resource intensive operation and consumes a lot of resources. It is highly recommended to run pipelines using this processor in a dedicated ingest node. -NOTE: To process an array of attachments the {ref}/foreach-processor.html[foreach processor] is required. +[[ingest-attachment-with-arrays]] +==== Using the Attachment Processor with arrays + +To use the attachment processor within an array of attachments the +{ref}/foreach-processor.html[foreach processor] is required. This +enables the attachment processor to be run on the individual elements +of the array. + +For example, given the following source: + +[source,js] +-------------------------------------------------- +{ + "attachments" : [ + { + "filename" : "ipsum.txt", + "data" : "dGhpcyBpcwpqdXN0IHNvbWUgdGV4dAo=" + }, + { + "filename" : "test.txt", + "data" : "VGhpcyBpcyBhIHRlc3QK" + } + ] +} +-------------------------------------------------- + +In this case, we want to process the data field in each element +of the attachments field and insert +the properties into the document so the following `foreach` +processor is used: + +[source,js] +-------------------------------------------------- +{ + "foreach": { + "field": "attachments", + "processor": { + "attachment": { + "target_field": "_ingest._value.attachment", + "field": "_ingest._value.data" + } + } + } +} +-------------------------------------------------- +Note that the `target_field` needs to be set, otherwise the +default value is used which is a top level field `attachment`. The +properties on this top level field will contain the value of the +first attachment only. However, by specifying the +`target_field` on to a value on `_ingest._value` it will correctly +associate the properties with the correct attachment. diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 8b6a4478115..c34904e92fd 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1166,6 +1166,10 @@ In this example, if the `remove` processor does fail, then the array elements that have been processed thus far will be updated. +Another advanced example can be found in {plugins}/using-ingest-attachment.html[attachment processor documentation]. + + + [[grok-processor]] === Grok Processor From e3eb36388221649935a1f3e722103013f111c309 Mon Sep 17 00:00:00 2001 From: Gameldar Date: Thu, 22 Dec 2016 20:52:08 +0800 Subject: [PATCH 005/119] Link directly to the attachments in arrays section The link should be made to the relevant section of the ingest attachments documentation, rather than the top of the page. --- docs/reference/ingest/ingest-node.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index c34904e92fd..d6acabd87f1 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1166,7 +1166,7 @@ In this example, if the `remove` processor does fail, then the array elements that have been processed thus far will be updated. -Another advanced example can be found in {plugins}/using-ingest-attachment.html[attachment processor documentation]. +Another advanced example can be found in the {plugins}/ingest-attachment-with-arrays.html[attachment processor documentation]. From faaa671fb624651c9d92b38c8de62ea552ba15ec Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 22 Dec 2016 20:08:02 -0500 Subject: [PATCH 006/119] Enable assertions in integration tests When starting a standalone cluster, we do not able assertions. This is problematic because it means that we miss opportunities to catch bugs. This commit enables assertions for standalone integration tests, and fixes a couple bugs that were uncovered by enabling these. Relates #22334 --- .../gradle/test/ClusterConfiguration.groovy | 8 +++++--- .../elasticsearch/index/engine/InternalEngine.java | 11 +++++------ .../org/elasticsearch/rest/BytesRestResponse.java | 7 ++++--- .../action/admin/indices/RestGetMappingAction.java | 2 ++ 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index ca4957f7a6c..57adaa2576d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -72,9 +72,11 @@ class ClusterConfiguration { boolean useMinimumMasterNodes = true @Input - String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') + - " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + - " " + System.getProperty('tests.jvm.argline', '') + String jvmArgs = "-ea" + + " " + "-Xms" + System.getProperty('tests.heap.size', '512m') + + " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + + " " + System.getProperty('tests.jvm.argline', '') + /** * A closure to call which returns the unicast host to connect to for cluster formation. diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index e142ac0f6f4..058ed0a19fc 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -592,15 +592,14 @@ public class InternalEngine extends Engine { private boolean assertSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) { if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) && origin == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { // legacy support - assert seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO : "old op recovering but it already has a seq no." + - " index version: " + engineConfig.getIndexSettings().getIndexVersionCreated() + ". seq no: " + seqNo; + assert seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO : "old op recovering but it already has a seq no.;" + + " index version: " + engineConfig.getIndexSettings().getIndexVersionCreated() + ", seqNo: " + seqNo; } else if (origin == Operation.Origin.PRIMARY) { // sequence number should not be set when operation origin is primary - assert seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO : "primary ops should never have an assigned seq no. got: " + seqNo; - } else { + assert seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO : "primary ops should never have an assigned seq no.; seqNo: " + seqNo; + } else if (engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { // sequence number should be set when operation origin is not primary - assert seqNo >= 0 : "recovery or replica ops should have an assigned seq no. origin: " + origin + - " index version: " + engineConfig.getIndexSettings().getIndexVersionCreated(); + assert seqNo >= 0 : "recovery or replica ops should have an assigned seq no.; origin: " + origin; } return true; } diff --git a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java index 7af8249bf2e..ba952e23c23 100644 --- a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java +++ b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java @@ -89,9 +89,10 @@ public class BytesRestResponse extends RestResponse { this.content = BytesArray.EMPTY; this.contentType = TEXT_CONTENT_TYPE; } else { - XContentBuilder builder = convert(channel, status, e); - this.content = builder.bytes(); - this.contentType = builder.contentType().mediaType(); + try (final XContentBuilder builder = convert(channel, status, e)) { + this.content = builder.bytes(); + this.contentType = builder.contentType().mediaType(); + } } if (e instanceof ElasticsearchException) { copyHeaders(((ElasticsearchException) e)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index 684a54b7f7e..69bbe47ecd4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -72,8 +72,10 @@ public class RestGetMappingAction extends BaseRestHandler { if (indices.length != 0 && types.length != 0) { return new BytesRestResponse(OK, builder.startObject().endObject()); } else if (indices.length != 0) { + builder.close(); return new BytesRestResponse(channel, new IndexNotFoundException(indices[0])); } else if (types.length != 0) { + builder.close(); return new BytesRestResponse(channel, new TypeMissingException("_all", types[0])); } else { return new BytesRestResponse(OK, builder.startObject().endObject()); From b100f1850564cdc22bbac34660a6bc0039eb57cd Mon Sep 17 00:00:00 2001 From: gameldar Date: Fri, 23 Dec 2016 13:48:44 +0800 Subject: [PATCH 007/119] Fix the ingest attachment array examples Fix up the ingest attachment array handling example so they are full examples and validated by the build system correctly. --- docs/plugins/ingest-attachment.asciidoc | 73 +++++++++++++++++++++++-- 1 file changed, 67 insertions(+), 6 deletions(-) diff --git a/docs/plugins/ingest-attachment.asciidoc b/docs/plugins/ingest-attachment.asciidoc index 1471fbdae0d..3aae6c0e146 100644 --- a/docs/plugins/ingest-attachment.asciidoc +++ b/docs/plugins/ingest-attachment.asciidoc @@ -131,6 +131,7 @@ For example, given the following source: ] } -------------------------------------------------- +// NOTCONSOLE In this case, we want to process the data field in each element of the attachments field and insert @@ -139,18 +140,78 @@ processor is used: [source,js] -------------------------------------------------- +PUT _ingest/pipeline/attachment { - "foreach": { - "field": "attachments", - "processor": { - "attachment": { - "target_field": "_ingest._value.attachment", - "field": "_ingest._value.data" + "description" : "Extract attachment information from arrays", + "processors" : [ + { + "foreach": { + "field": "attachments", + "processor": { + "attachment": { + "target_field": "_ingest._value.attachment", + "field": "_ingest._value.data" + } + } } } + ] +} +PUT my_index/my_type/my_id?pipeline=attachment +{ + "attachments" : [ + { + "filename" : "ipsum.txt", + "data" : "dGhpcyBpcwpqdXN0IHNvbWUgdGV4dAo=" + }, + { + "filename" : "test.txt", + "data" : "VGhpcyBpcyBhIHRlc3QK" + } + ] +} +GET my_index/my_type/my_id +-------------------------------------------------- +// CONSOLE + +Returns this: +[source,js] +-------------------------------------------------- +{ + "_index" : "my_index", + "_type" : "my_type", + "_id" : "my_id", + "_version" : 1, + "found" : true, + "_source" : { + "attachments" : [ + { + "filename" : "ipsum.txt", + "data" : "dGhpcyBpcwpqdXN0IHNvbWUgdGV4dAo=", + "attachment" : { + "content_type" : "text/plain; charset=ISO-8859-1", + "language" : "en", + "content" : "this is\njust some text", + "content_length" : 24 + } + }, + { + "filename" : "test.txt", + "data" : "VGhpcyBpcyBhIHRlc3QK", + "attachment" : { + "content_type" : "text/plain; charset=ISO-8859-1", + "language" : "en", + "content" : "This is a test", + "content_length" : 16 + } + } + ] } } -------------------------------------------------- +// TESTRESPONSE + + Note that the `target_field` needs to be set, otherwise the default value is used which is a top level field `attachment`. The properties on this top level field will contain the value of the From 215874aff3af36e24eb134e170a50d2990ad56d6 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Fri, 23 Dec 2016 09:03:43 +0100 Subject: [PATCH 008/119] process TestLogging annotation value in prefix-first order We have to sort the logger names so they wouldn't override each other. Processing org.elasticsearch:DEBUG after org.elasticsearch.transport:TRACE resets the setting of the later --- .../test/junit/listeners/LoggingListener.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java index 0009c21d6aa..88be2be8cfb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java @@ -28,6 +28,7 @@ import org.junit.runner.notification.RunListener; import java.util.HashMap; import java.util.Map; +import java.util.TreeMap; /** * A {@link RunListener} that allows to change the log level for a specific test method. @@ -81,8 +82,13 @@ public class LoggingListener extends RunListener { if (map == null) { return null; } + // sort the logging keys so they wouldn't override each other. + // for example, processing org.elasticsearch:DEBUG after org.elasticsearch.transport:TRACE + // will reset the later + TreeMap sortedLogNames = new TreeMap<>(String::compareTo); + sortedLogNames.putAll(map); Map previousValues = new HashMap<>(); - for (Map.Entry entry : map.entrySet()) { + for (Map.Entry entry : sortedLogNames.entrySet()) { Logger logger = resolveLogger(entry.getKey()); previousValues.put(entry.getKey(), logger.getLevel().toString()); Loggers.setLevel(logger, entry.getValue()); From eb7450bcdc6a1f2e5c8364a2e21d9a8348742dde Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Fri, 23 Dec 2016 09:15:11 +0100 Subject: [PATCH 009/119] UnicastZenPing add trace logging on connection opening --- .../java/org/elasticsearch/discovery/zen/UnicastZenPing.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index 4e2ed19f422..6658913cce6 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -387,6 +387,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { if (result == null) { ensureOpen(); boolean success = false; + logger.trace("[{}] opening connection to [{}]", id(), node); result = transportService.openConnection(node, connectionProfile); try { transportService.handshake(result, connectionProfile.getHandshakeTimeout().millis()); @@ -399,6 +400,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { } } finally { if (success == false) { + logger.trace("[{}] closing connection to [{}] due to failure", id(), node); IOUtils.closeWhileHandlingException(result); } } From 70594a66c7b30c8bcf95b125cb803f8034cfa9ad Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 23 Dec 2016 09:37:44 +0100 Subject: [PATCH 010/119] Only run the unmapped+missing tests on 5.2+. --- .../test/search.aggregation/20_terms.yaml | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml index 35febfa28da..769865bd0e3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml @@ -469,6 +469,10 @@ setup: --- "Unmapped strings": + - skip: + version: " - 5.1.99" + reason: Unmapped fields handling with value_type was added in 5.2 + - do: index: index: test_1 @@ -494,6 +498,10 @@ setup: --- "Unmapped booleans": + - skip: + version: " - 5.1.99" + reason: Unmapped fields handling with value_type was added in 5.2 + - do: index: index: test_1 @@ -521,6 +529,10 @@ setup: --- "Unmapped dates": + - skip: + version: " - 5.1.99" + reason: Unmapped fields handling with value_type was added in 5.2 + - do: index: index: test_1 @@ -548,6 +560,10 @@ setup: --- "Unmapped longs": + - skip: + version: " - 5.1.99" + reason: Unmapped fields handling with value_type was added in 5.2 + - do: index: index: test_1 @@ -573,6 +589,10 @@ setup: --- "Unmapped doubles": + - skip: + version: " - 5.1.99" + reason: Unmapped fields handling with value_type was added in 5.2 + - do: index: index: test_1 From e1b6166f21eef2177efe2943efd90392e95c441b Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 23 Dec 2016 11:28:12 +0100 Subject: [PATCH 011/119] Makes more obvious that we expect an array --- docs/plugins/ingest-attachment.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/plugins/ingest-attachment.asciidoc b/docs/plugins/ingest-attachment.asciidoc index 642058b2f23..3a628803501 100644 --- a/docs/plugins/ingest-attachment.asciidoc +++ b/docs/plugins/ingest-attachment.asciidoc @@ -52,7 +52,7 @@ The node must be stopped before removing the plugin. | `field` | yes | - | The field to get the base64 encoded field from | `target_field` | no | attachment | The field that will hold the attachment information | `indexed_chars` | no | 100000 | The number of chars being used for extraction to prevent huge fields. Use `-1` for no limit. -| `properties` | no | all properties | Properties to select to be stored. Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language` +| `properties` | no | all properties | Array of properties to select to be stored. Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language` | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document |====== From baea17b53fbdb7eae92607ed18f3d18110849712 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 23 Dec 2016 12:23:52 +0100 Subject: [PATCH 012/119] Separate cluster update tasks that are published from those that are not (#21912) This commit factors out the cluster state update tasks that are published (ClusterStateUpdateTask) from those that are not (LocalClusterUpdateTask), serving as a basis for future refactorings to separate the publishing mechanism out of ClusterService. --- .../health/TransportClusterHealthAction.java | 73 ++++++---- .../cluster/ClusterStateTaskExecutor.java | 23 +++- .../cluster/ClusterStateUpdateTask.java | 15 ++- .../cluster/LocalClusterUpdateTask.java | 93 +++++++++++++ .../action/shard/ShardStateAction.java | 10 +- .../metadata/MetaDataMappingService.java | 16 +-- .../cluster/service/ClusterService.java | 116 ++++++++++------ .../discovery/zen/NodeJoinController.java | 16 +-- .../discovery/zen/ZenDiscovery.java | 125 ++++++------------ .../indices/store/IndicesStore.java | 15 +-- .../java/org/elasticsearch/node/Node.java | 1 + .../snapshots/RestoreService.java | 4 +- .../org/elasticsearch/tribe/TribeService.java | 14 +- ...rdFailedClusterStateTaskExecutorTests.java | 14 +- .../health/ClusterStateHealthTests.java | 14 +- .../cluster/service/ClusterServiceTests.java | 70 +++------- .../zen/NodeJoinControllerTests.java | 3 +- ...eRemovalClusterStateTaskExecutorTests.java | 7 +- .../store/IndicesStoreIntegrationIT.java | 15 +-- .../java/org/elasticsearch/tribe/TribeIT.java | 8 +- .../test/ClusterServiceUtils.java | 17 +-- .../BlockClusterStateProcessing.java | 13 +- .../SlowClusterStateProcessing.java | 13 +- 23 files changed, 381 insertions(+), 314 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/cluster/LocalClusterUpdateTask.java diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 6f2a72c5ba4..44c604dc8b8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -85,37 +86,55 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< protected void masterOperation(Task task, final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener listener) { if (request.waitForEvents() != null) { final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis(); - clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) { - @Override - public ClusterState execute(ClusterState currentState) { - return currentState; - } + if (request.local()) { + clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new LocalClusterUpdateTask(request.waitForEvents()) { + @Override + public ClusterTasksResult execute(ClusterState currentState) { + return unchanged(); + } - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime())); - final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); - request.timeout(newTimeout); - executeHealth(request, listener); - } + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime())); + final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); + request.timeout(newTimeout); + executeHealth(request, listener); + } - @Override - public void onNoLongerMaster(String source) { - logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents()); - doExecute(task, request, listener); - } + @Override + public void onFailure(String source, Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + listener.onFailure(e); + } + }); + } else { + clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } - @Override - public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - listener.onFailure(e); - } + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime())); + final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); + request.timeout(newTimeout); + executeHealth(request, listener); + } - @Override - public boolean runOnlyOnMaster() { - return !request.local(); - } - }); + @Override + public void onNoLongerMaster(String source) { + logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents()); + doExecute(task, request, listener); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + listener.onFailure(e); + } + }); + } } else { executeHealth(request, listener); } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index e5493eaa955..3693447cfb6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.cluster; +import org.elasticsearch.common.Nullable; + import java.util.IdentityHashMap; import java.util.List; import java.util.Map; @@ -27,10 +29,10 @@ public interface ClusterStateTaskExecutor { * Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state * should be changed. */ - BatchResult execute(ClusterState currentState, List tasks) throws Exception; + ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception; /** - * indicates whether this task should only run if current node is master + * indicates whether this executor should only run if the current node is master */ default boolean runOnlyOnMaster() { return true; @@ -68,18 +70,22 @@ public interface ClusterStateTaskExecutor { * Represents the result of a batched execution of cluster state update tasks * @param the type of the cluster state update task */ - class BatchResult { + class ClusterTasksResult { + public final boolean noMaster; + @Nullable public final ClusterState resultingState; public final Map executionResults; /** * Construct an execution result instance with a correspondence between the tasks and their execution result + * @param noMaster whether this node steps down as master or has lost connection to the master * @param resultingState the resulting cluster state * @param executionResults the correspondence between tasks and their outcome */ - BatchResult(ClusterState resultingState, Map executionResults) { + ClusterTasksResult(boolean noMaster, ClusterState resultingState, Map executionResults) { this.resultingState = resultingState; this.executionResults = executionResults; + this.noMaster = noMaster; } public static Builder builder() { @@ -117,8 +123,13 @@ public interface ClusterStateTaskExecutor { return this; } - public BatchResult build(ClusterState resultingState) { - return new BatchResult<>(resultingState, executionResults); + public ClusterTasksResult build(ClusterState resultingState) { + return new ClusterTasksResult<>(false, resultingState, executionResults); + } + + ClusterTasksResult build(ClusterTasksResult result, ClusterState previousState) { + return new ClusterTasksResult<>(result.noMaster, result.resultingState == null ? previousState : result.resultingState, + executionResults); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java index a679d098616..b298e7e915d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java @@ -28,7 +28,7 @@ import java.util.List; /** * A task that can update the cluster state. */ -public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener { +public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener { private final Priority priority; @@ -41,9 +41,9 @@ public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, } @Override - public final BatchResult execute(ClusterState currentState, List tasks) throws Exception { + public final ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { ClusterState result = execute(currentState); - return BatchResult.builder().successes(tasks).build(result); + return ClusterTasksResult.builder().successes(tasks).build(result); } @Override @@ -75,4 +75,13 @@ public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, public Priority priority() { return priority; } + + /** + * Marked as final as cluster state update tasks should only run on master. + * For local requests, use {@link LocalClusterUpdateTask} instead. + */ + @Override + public final boolean runOnlyOnMaster() { + return true; + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/LocalClusterUpdateTask.java b/core/src/main/java/org/elasticsearch/cluster/LocalClusterUpdateTask.java new file mode 100644 index 00000000000..9692ff8d4e1 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/LocalClusterUpdateTask.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.unit.TimeValue; + +import java.util.List; + +/** + * Used to apply state updates on nodes that are not necessarily master + */ +public abstract class LocalClusterUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, + ClusterStateTaskListener { + + private final Priority priority; + + public LocalClusterUpdateTask() { + this(Priority.NORMAL); + } + + public LocalClusterUpdateTask(Priority priority) { + this.priority = priority; + } + + public abstract ClusterTasksResult execute(ClusterState currentState) throws Exception; + + @Override + public final ClusterTasksResult execute(ClusterState currentState, + List tasks) throws Exception { + assert tasks.size() == 1 && tasks.get(0) == this : "expected one-element task list containing current object but was " + tasks; + ClusterTasksResult result = execute(currentState); + return ClusterTasksResult.builder().successes(tasks).build(result, currentState); + } + + /** + * node stepped down as master or has lost connection to the master + */ + public static ClusterTasksResult noMaster() { + return new ClusterTasksResult(true, null, null); + } + + /** + * no changes were made to the cluster state. Useful to execute a runnable on the cluster state applier thread + */ + public static ClusterTasksResult unchanged() { + return new ClusterTasksResult(false, null, null); + } + + /** + * locally apply cluster state received from a master + */ + public static ClusterTasksResult newState(ClusterState clusterState) { + return new ClusterTasksResult(false, clusterState, null); + } + + @Override + public String describeTasks(List tasks) { + return ""; // one of task, source is enough + } + + @Nullable + public TimeValue timeout() { + return null; + } + + @Override + public Priority priority() { + return priority; + } + + @Override + public final boolean runOnlyOnMaster() { + return false; + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 14b028042a0..411aabbf843 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -25,10 +25,10 @@ import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateTaskConfig; -import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.MasterNodeChangePredicate; import org.elasticsearch.cluster.NotMasterException; @@ -260,8 +260,8 @@ public class ShardStateAction extends AbstractComponent { } @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - BatchResult.Builder batchResultBuilder = BatchResult.builder(); + public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { + ClusterTasksResult.Builder batchResultBuilder = ClusterTasksResult.builder(); List tasksToBeApplied = new ArrayList<>(); List failedShardsToBeApplied = new ArrayList<>(); List staleShardsToBeApplied = new ArrayList<>(); @@ -394,8 +394,8 @@ public class ShardStateAction extends AbstractComponent { } @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - BatchResult.Builder builder = BatchResult.builder(); + public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { + ClusterTasksResult.Builder builder = ClusterTasksResult.builder(); List tasksToBeApplied = new ArrayList<>(); List shardRoutingsToBeApplied = new ArrayList<>(tasks.size()); Set seenShardRoutings = new HashSet<>(); // to prevent duplicates diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 8defc5c7c47..c0032a4b6a4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -26,9 +26,9 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateTaskListener; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; -import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; @@ -64,8 +64,8 @@ public class MetaDataMappingService extends AbstractComponent { private final ClusterService clusterService; private final IndicesService indicesService; - final ClusterStateTaskExecutor refreshExecutor = new RefreshTaskExecutor(); - final ClusterStateTaskExecutor putMappingExecutor = new PutMappingExecutor(); + final RefreshTaskExecutor refreshExecutor = new RefreshTaskExecutor(); + final PutMappingExecutor putMappingExecutor = new PutMappingExecutor(); @Inject @@ -92,9 +92,9 @@ public class MetaDataMappingService extends AbstractComponent { class RefreshTaskExecutor implements ClusterStateTaskExecutor { @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { ClusterState newClusterState = executeRefresh(currentState, tasks); - return BatchResult.builder().successes(tasks).build(newClusterState); + return ClusterTasksResult.builder().successes(tasks).build(newClusterState); } } @@ -211,10 +211,10 @@ public class MetaDataMappingService extends AbstractComponent { class PutMappingExecutor implements ClusterStateTaskExecutor { @Override - public BatchResult execute(ClusterState currentState, - List tasks) throws Exception { + public ClusterTasksResult execute(ClusterState currentState, + List tasks) throws Exception { Map indexMapperServices = new HashMap<>(); - BatchResult.Builder builder = BatchResult.builder(); + ClusterTasksResult.Builder builder = ClusterTasksResult.builder(); try { for (PutMappingClusterStateUpdateRequest request : tasks) { try { diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index d213cea4d33..d4fff64530e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -31,9 +31,8 @@ import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskExecutor; -import org.elasticsearch.cluster.ClusterStateTaskExecutor.BatchResult; +import org.elasticsearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; import org.elasticsearch.cluster.ClusterStateTaskListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.LocalNodeMasterListener; import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.TimeoutClusterStateListener; @@ -64,6 +63,7 @@ import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.PrioritizedRunnable; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -137,6 +137,8 @@ public class ClusterService extends AbstractLifecycleComponent { private NodeConnectionsService nodeConnectionsService; + private DiscoverySettings discoverySettings; + public ClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { super(settings); @@ -214,6 +216,7 @@ public class ClusterService extends AbstractLifecycleComponent { Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); Objects.requireNonNull(state().nodes().getLocalNode(), "please set the local node before starting"); Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting"); + Objects.requireNonNull(discoverySettings, "please set discovery settings before starting"); addListener(localNodeMasterListeners); updateState(state -> ClusterState.builder(state).blocks(initialBlocks).build()); this.threadPoolExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), @@ -379,11 +382,11 @@ public class ClusterService extends AbstractLifecycleComponent { * task * */ - public void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask) { + public & ClusterStateTaskListener> void submitStateUpdateTask( + final String source, final T updateTask) { submitStateUpdateTask(source, updateTask, updateTask, updateTask, updateTask); } - /** * Submits a cluster state update task; submitted updates will be * batched across the same instance of executor. The exact batching @@ -573,6 +576,10 @@ public class ClusterService extends AbstractLifecycleComponent { return clusterName; } + public void setDiscoverySettings(DiscoverySettings discoverySettings) { + this.discoverySettings = discoverySettings; + } + abstract static class SourcePrioritizedRunnable extends PrioritizedRunnable { protected final String source; @@ -643,29 +650,28 @@ public class ClusterService extends AbstractLifecycleComponent { } public TaskOutputs calculateTaskOutputs(TaskInputs taskInputs, ClusterState previousClusterState, long startTimeNS) { - BatchResult batchResult = executeTasks(taskInputs, startTimeNS, previousClusterState); - ClusterState newClusterState = batchResult.resultingState; + ClusterTasksResult clusterTasksResult = executeTasks(taskInputs, startTimeNS, previousClusterState); // extract those that are waiting for results List nonFailedTasks = new ArrayList<>(); for (UpdateTask updateTask : taskInputs.updateTasks) { - assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask; + assert clusterTasksResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask; final ClusterStateTaskExecutor.TaskResult taskResult = - batchResult.executionResults.get(updateTask.task); + clusterTasksResult.executionResults.get(updateTask.task); if (taskResult.isSuccess()) { nonFailedTasks.add(updateTask); } } - newClusterState = patchVersions(previousClusterState, newClusterState); + ClusterState newClusterState = patchVersionsAndNoMasterBlocks(previousClusterState, clusterTasksResult); return new TaskOutputs(taskInputs, previousClusterState, newClusterState, nonFailedTasks, - batchResult.executionResults); + clusterTasksResult.executionResults); } - private BatchResult executeTasks(TaskInputs taskInputs, long startTimeNS, ClusterState previousClusterState) { - BatchResult batchResult; + private ClusterTasksResult executeTasks(TaskInputs taskInputs, long startTimeNS, ClusterState previousClusterState) { + ClusterTasksResult clusterTasksResult; try { List inputs = taskInputs.updateTasks.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); - batchResult = taskInputs.executor.execute(previousClusterState, inputs); + clusterTasksResult = taskInputs.executor.execute(previousClusterState, inputs); } catch (Exception e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { @@ -681,42 +687,70 @@ public class ClusterService extends AbstractLifecycleComponent { e); } warnAboutSlowTaskIfNeeded(executionTime, taskInputs.summary); - batchResult = BatchResult.builder() + clusterTasksResult = ClusterTasksResult.builder() .failures(taskInputs.updateTasks.stream().map(updateTask -> updateTask.task)::iterator, e) .build(previousClusterState); } - assert batchResult.executionResults != null; - assert batchResult.executionResults.size() == taskInputs.updateTasks.size() + assert clusterTasksResult.executionResults != null; + assert clusterTasksResult.executionResults.size() == taskInputs.updateTasks.size() : String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", taskInputs.updateTasks.size(), - taskInputs.updateTasks.size() == 1 ? "" : "s", batchResult.executionResults.size()); + taskInputs.updateTasks.size() == 1 ? "" : "s", clusterTasksResult.executionResults.size()); boolean assertsEnabled = false; assert (assertsEnabled = true); if (assertsEnabled) { for (UpdateTask updateTask : taskInputs.updateTasks) { - assert batchResult.executionResults.containsKey(updateTask.task) : + assert clusterTasksResult.executionResults.containsKey(updateTask.task) : "missing task result for " + updateTask; } } - return batchResult; + return clusterTasksResult; } - private ClusterState patchVersions(ClusterState previousClusterState, ClusterState newClusterState) { - if (previousClusterState != newClusterState) { - if (newClusterState.nodes().isLocalNodeElectedMaster()) { - // only the master controls the version numbers - Builder builder = ClusterState.builder(newClusterState).incrementVersion(); - if (previousClusterState.routingTable() != newClusterState.routingTable()) { - builder.routingTable(RoutingTable.builder(newClusterState.routingTable()) - .version(newClusterState.routingTable().version() + 1).build()); - } - if (previousClusterState.metaData() != newClusterState.metaData()) { - builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1)); - } - newClusterState = builder.build(); + private ClusterState patchVersionsAndNoMasterBlocks(ClusterState previousClusterState, ClusterTasksResult executionResult) { + ClusterState newClusterState = executionResult.resultingState; + + if (executionResult.noMaster) { + assert newClusterState == previousClusterState : "state can only be changed by ClusterService when noMaster = true"; + if (previousClusterState.nodes().getMasterNodeId() != null) { + // remove block if it already exists before adding new one + assert previousClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock().id()) == false : + "NO_MASTER_BLOCK should only be added by ClusterService"; + ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(previousClusterState.blocks()) + .addGlobalBlock(discoverySettings.getNoMasterBlock()) + .build(); + + DiscoveryNodes discoveryNodes = new DiscoveryNodes.Builder(previousClusterState.nodes()).masterNodeId(null).build(); + newClusterState = ClusterState.builder(previousClusterState) + .blocks(clusterBlocks) + .nodes(discoveryNodes) + .build(); } + } else if (newClusterState.nodes().isLocalNodeElectedMaster() && previousClusterState != newClusterState) { + // only the master controls the version numbers + Builder builder = ClusterState.builder(newClusterState).incrementVersion(); + if (previousClusterState.routingTable() != newClusterState.routingTable()) { + builder.routingTable(RoutingTable.builder(newClusterState.routingTable()) + .version(newClusterState.routingTable().version() + 1).build()); + } + if (previousClusterState.metaData() != newClusterState.metaData()) { + builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1)); + } + + // remove the no master block, if it exists + if (newClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock().id())) { + builder.blocks(ClusterBlocks.builder().blocks(newClusterState.blocks()) + .removeGlobalBlock(discoverySettings.getNoMasterBlock().id())); + } + + newClusterState = builder.build(); } + + assert newClusterState.nodes().getMasterNodeId() == null || + newClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock().id()) == false : + "cluster state with master node must not have NO_MASTER_BLOCK"; + return newClusterState; } @@ -801,14 +835,16 @@ public class ClusterService extends AbstractLifecycleComponent { taskOutputs.processedDifferentClusterState(previousClusterState, newClusterState); - try { - taskOutputs.clusterStatePublished(clusterChangedEvent); - } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( - "exception thrown while notifying executor of new cluster state publication [{}]", - taskInputs.summary), - e); + if (newClusterState.nodes().isLocalNodeElectedMaster()) { + try { + taskOutputs.clusterStatePublished(clusterChangedEvent); + } catch (Exception e) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "exception thrown while notifying executor of new cluster state publication [{}]", + taskInputs.summary), + e); + } } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 74dbf835b58..2d84f5f863d 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -24,12 +24,11 @@ import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; -import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.NotMasterException; -import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; @@ -59,7 +58,6 @@ public class NodeJoinController extends AbstractComponent { private final ClusterService clusterService; private final AllocationService allocationService; private final ElectMasterService electMaster; - private final DiscoverySettings discoverySettings; private final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(); // this is set while trying to become a master @@ -68,12 +66,11 @@ public class NodeJoinController extends AbstractComponent { public NodeJoinController(ClusterService clusterService, AllocationService allocationService, ElectMasterService electMaster, - DiscoverySettings discoverySettings, Settings settings) { + Settings settings) { super(settings); this.clusterService = clusterService; this.allocationService = allocationService; this.electMaster = electMaster; - this.discoverySettings = discoverySettings; } /** @@ -408,8 +405,9 @@ public class NodeJoinController extends AbstractComponent { class JoinTaskExecutor implements ClusterStateTaskExecutor { @Override - public BatchResult execute(ClusterState currentState, List joiningNodes) throws Exception { - final BatchResult.Builder results = BatchResult.builder(); + public ClusterTasksResult execute(ClusterState currentState, List joiningNodes) throws Exception { + final ClusterTasksResult.Builder results = ClusterTasksResult.builder(); + final DiscoveryNodes currentNodes = currentState.nodes(); boolean nodesChanged = false; ClusterState.Builder newState; @@ -471,8 +469,6 @@ public class NodeJoinController extends AbstractComponent { DiscoveryNodes currentNodes = currentState.nodes(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); - ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()) - .removeGlobalBlock(discoverySettings.getNoMasterBlock()).build(); for (final DiscoveryNode joiningNode : joiningNodes) { final DiscoveryNode nodeWithSameId = nodesBuilder.get(joiningNode.getId()); if (nodeWithSameId != null && nodeWithSameId.equals(joiningNode) == false) { @@ -490,7 +486,7 @@ public class NodeJoinController extends AbstractComponent { // now trim any left over dead nodes - either left there when the previous master stepped down // or removed by us above - ClusterState tmpState = ClusterState.builder(currentState).nodes(nodesBuilder).blocks(clusterBlocks).build(); + ClusterState tmpState = ClusterState.builder(currentState).nodes(nodesBuilder).build(); return ClusterState.builder(allocationService.deassociateDeadNodes(tmpState, false, "removed dead nodes on election")); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index b0adf1696ee..fdadb775ad6 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -27,13 +27,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; -import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.NotMasterException; -import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -207,25 +206,20 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover nodesFD.setLocalNode(clusterService.localNode()); joinThreadControl.start(); zenPing.start(this); - this.nodeJoinController = new NodeJoinController(clusterService, allocationService, electMaster, discoverySettings, settings); + this.nodeJoinController = new NodeJoinController(clusterService, allocationService, electMaster, settings); this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::submitRejoin, logger); } @Override public void startInitialJoin() { // start the join thread from a cluster state update. See {@link JoinThreadControl} for details. - clusterService.submitStateUpdateTask("initial_join", new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("initial_join", new LocalClusterUpdateTask() { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterTasksResult execute(ClusterState currentState) throws Exception { // do the join on a different thread, the DiscoveryService waits for 30s anyhow till it is discovered joinThreadControl.startNewThreadIfNotRunning(); - return currentState; + return unchanged(); } @Override @@ -352,7 +346,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return joinThreadControl.joinThreadActive(); } - // used for testing public ClusterState[] pendingClusterStates() { return publishClusterState.pendingStatesQueue().pendingClusterStates(); @@ -408,18 +401,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover // finalize join through the cluster state update thread final DiscoveryNode finalMasterNode = masterNode; - clusterService.submitStateUpdateTask("finalize_join (" + masterNode + ")", new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("finalize_join (" + masterNode + ")", new LocalClusterUpdateTask() { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterTasksResult execute(ClusterState currentState) throws Exception { if (!success) { // failed to join. Try again... joinThreadControl.markThreadAsDoneAndStartNew(currentThread); - return currentState; + return unchanged(); } if (currentState.getNodes().getMasterNode() == null) { @@ -427,7 +415,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover // a valid master. logger.debug("no master node is set, despite of join request completing. retrying pings."); joinThreadControl.markThreadAsDoneAndStartNew(currentThread); - return currentState; + return unchanged(); } if (!currentState.getNodes().getMasterNode().equals(finalMasterNode)) { @@ -437,7 +425,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover // Note: we do not have to start master fault detection here because it's set at {@link #processNextPendingClusterState } // when the first cluster state arrives. joinThreadControl.markThreadAsDone(currentThread); - return currentState; + return unchanged(); } @Override @@ -496,9 +484,9 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } private void submitRejoin(String source) { - clusterService.submitStateUpdateTask(source, new ClusterStateUpdateTask(Priority.IMMEDIATE) { + clusterService.submitStateUpdateTask(source, new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public ClusterState execute(ClusterState currentState) { + public ClusterTasksResult execute(ClusterState currentState) { return rejoin(currentState, source); } @@ -554,7 +542,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } @Override - public BatchResult execute(final ClusterState currentState, final List tasks) throws Exception { + public ClusterTasksResult execute(final ClusterState currentState, final List tasks) throws Exception { final DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(currentState.nodes()); boolean removed = false; for (final Task task : tasks) { @@ -568,12 +556,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover if (!removed) { // no nodes to remove, keep the current cluster state - return BatchResult.builder().successes(tasks).build(currentState); + return ClusterTasksResult.builder().successes(tasks).build(currentState); } final ClusterState remainingNodesClusterState = remainingNodesClusterState(currentState, remainingNodesBuilder); - final BatchResult.Builder resultBuilder = BatchResult.builder().successes(tasks); + final ClusterTasksResult.Builder resultBuilder = ClusterTasksResult.builder().successes(tasks); if (!electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes())) { rejoin.accept("not enough master nodes"); return resultBuilder.build(currentState); @@ -645,14 +633,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover // We only set the new value. If the master doesn't see enough nodes it will revoke it's mastership. return; } - clusterService.submitStateUpdateTask("zen-disco-mini-master-nodes-changed", new ClusterStateUpdateTask(Priority.IMMEDIATE) { + clusterService.submitStateUpdateTask("zen-disco-min-master-nodes-changed", new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public ClusterState execute(ClusterState currentState) { + public ClusterTasksResult execute(ClusterState currentState) { // check if we have enough master nodes, if not, we need to move into joining the cluster again if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) { return rejoin(currentState, "not enough master nodes on change of minimum_master_nodes from [" + prevMinimumMasterNode + "] to [" + minimumMasterNodes + "]"); } - return currentState; + return unchanged(); } @@ -685,18 +673,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover logger.info((Supplier) () -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause); - clusterService.submitStateUpdateTask("master_failed (" + masterNode + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) { + clusterService.submitStateUpdateTask("master_failed (" + masterNode + ")", new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) { + public ClusterTasksResult execute(ClusterState currentState) { if (!masterNode.equals(currentState.nodes().getMasterNode())) { // master got switched on us, no need to send anything - return currentState; + return unchanged(); } // flush any pending cluster states from old master, so it will not be set as master again @@ -710,29 +693,20 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - } - }); } void processNextPendingClusterState(String reason) { - clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", new ClusterStateUpdateTask(Priority.URGENT) { - @Override - public boolean runOnlyOnMaster() { - return false; - } - + clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", new LocalClusterUpdateTask(Priority.URGENT) { ClusterState newClusterState = null; @Override - public ClusterState execute(ClusterState currentState) { + public ClusterTasksResult execute(ClusterState currentState) { newClusterState = publishClusterState.pendingStatesQueue().getNextClusterStateToProcess(); // all pending states have been processed if (newClusterState == null) { - return currentState; + return unchanged(); } assert newClusterState.nodes().getMasterNode() != null : "received a cluster state without a master"; @@ -743,7 +717,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } if (shouldIgnoreOrRejectNewClusterState(logger, currentState, newClusterState)) { - return currentState; + return unchanged(); } // check to see that we monitor the correct master of the cluster @@ -754,7 +728,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover if (currentState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock())) { // its a fresh update from the master as we transition from a start of not having a master to having one logger.debug("got first state from fresh master [{}]", newClusterState.nodes().getMasterNodeId()); - return newClusterState; + return newState(newClusterState); } @@ -784,7 +758,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover builder.metaData(metaDataBuilder); } - return builder.build(); + return newState(builder.build()); } @Override @@ -962,7 +936,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return pingResponses; } - protected ClusterState rejoin(ClusterState clusterState, String reason) { + protected ClusterStateTaskExecutor.ClusterTasksResult rejoin(ClusterState clusterState, String reason) { // *** called from within an cluster state update task *** // assert Thread.currentThread().getName().contains(ClusterService.UPDATE_THREAD_NAME); @@ -971,29 +945,17 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover nodesFD.stop(); masterFD.stop(reason); - - ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(clusterState.blocks()) - .addGlobalBlock(discoverySettings.getNoMasterBlock()) - .build(); - - // clean the nodes, we are now not connected to anybody, since we try and reform the cluster - DiscoveryNodes discoveryNodes = new DiscoveryNodes.Builder(clusterState.nodes()).masterNodeId(null).build(); - // TODO: do we want to force a new thread if we actively removed the master? this is to give a full pinging cycle // before a decision is made. joinThreadControl.startNewThreadIfNotRunning(); - - return ClusterState.builder(clusterState) - .blocks(clusterBlocks) - .nodes(discoveryNodes) - .build(); + return LocalClusterUpdateTask.noMaster(); } private boolean localNodeMaster() { return nodes().isLocalNodeElectedMaster(); } - private ClusterState handleAnotherMaster(ClusterState localClusterState, final DiscoveryNode otherMaster, long otherClusterStateVersion, String reason) { + private ClusterStateTaskExecutor.ClusterTasksResult handleAnotherMaster(ClusterState localClusterState, final DiscoveryNode otherMaster, long otherClusterStateVersion, String reason) { assert localClusterState.nodes().isLocalNodeElectedMaster() : "handleAnotherMaster called but current node is not a master"; assert Thread.currentThread().getName().contains(ClusterService.UPDATE_THREAD_NAME) : "not called from the cluster state update thread"; @@ -1016,7 +978,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } catch (Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e); } - return localClusterState; + return LocalClusterUpdateTask.unchanged(); } } @@ -1083,12 +1045,16 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return; } logger.debug("got a ping from another master {}. resolving who should rejoin. current ping count: [{}]", pingRequest.masterNode(), pingsWhileMaster.get()); - clusterService.submitStateUpdateTask("ping from another master", new ClusterStateUpdateTask(Priority.IMMEDIATE) { + clusterService.submitStateUpdateTask("ping from another master", new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public ClusterState execute(ClusterState currentState) throws Exception { - pingsWhileMaster.set(0); - return handleAnotherMaster(currentState, pingRequest.masterNode(), pingRequest.clusterStateVersion(), "node fd ping"); + public ClusterTasksResult execute(ClusterState currentState) throws Exception { + if (currentState.nodes().isLocalNodeElectedMaster()) { + pingsWhileMaster.set(0); + return handleAnotherMaster(currentState, pingRequest.masterNode(), pingRequest.clusterStateVersion(), "node fd ping"); + } else { + return unchanged(); + } } @Override @@ -1134,15 +1100,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover class RejoinClusterRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel) throws Exception { - clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", new ClusterStateUpdateTask(Priority.IMMEDIATE) { + clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) { + public ClusterTasksResult execute(ClusterState currentState) { try { channel.sendResponse(TransportResponse.Empty.INSTANCE); } catch (Exception e) { @@ -1186,7 +1147,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } /** cleans any running joining thread and calls {@link #rejoin} */ - public ClusterState stopRunningThreadAndRejoin(ClusterState clusterState, String reason) { + public ClusterStateTaskExecutor.ClusterTasksResult stopRunningThreadAndRejoin(ClusterState clusterState, String reason) { ClusterService.assertClusterStateThread(); currentJoinThread.set(null); return rejoin(clusterState, reason); diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 589d8348981..8b59a3342b0 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -26,7 +26,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateObserver; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -283,24 +283,19 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe return; } - clusterService.submitStateUpdateTask("indices_store ([" + shardId + "] active fully on other nodes)", new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("indices_store ([" + shardId + "] active fully on other nodes)", new LocalClusterUpdateTask() { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterTasksResult execute(ClusterState currentState) throws Exception { if (clusterStateVersion != currentState.getVersion()) { logger.trace("not deleting shard {}, the update task state version[{}] is not equal to cluster state before shard active api call [{}]", shardId, currentState.getVersion(), clusterStateVersion); - return currentState; + return unchanged(); } try { indicesService.deleteShardStore("no longer used", shardId, currentState); } catch (Exception ex) { logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex); } - return currentState; + return unchanged(); } @Override diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 2c29d00ecc6..49115d8b3c4 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -564,6 +564,7 @@ public class Node implements Closeable { injector.getInstance(ResourceWatcherService.class).start(); injector.getInstance(GatewayService.class).start(); Discovery discovery = injector.getInstance(Discovery.class); + clusterService.setDiscoverySettings(discovery.getDiscoverySettings()); clusterService.addInitialStateBlock(discovery.getDiscoverySettings().getNoMasterBlock()); clusterService.setClusterStatePublisher(discovery::publish); diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 7f84f2d66c8..e2b389d1e05 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -628,8 +628,8 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp } @Override - public BatchResult execute(final ClusterState currentState, final List tasks) throws Exception { - final BatchResult.Builder resultBuilder = BatchResult.builder().successes(tasks); + public ClusterTasksResult execute(final ClusterState currentState, final List tasks) throws Exception { + final ClusterTasksResult.Builder resultBuilder = ClusterTasksResult.builder().successes(tasks); Set completedSnapshots = tasks.stream().map(e -> e.snapshot).collect(Collectors.toSet()); final List entries = new ArrayList<>(); final RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE); diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index d976f9229b8..dd87019ee47 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -359,19 +359,19 @@ public class TribeService extends AbstractLifecycleComponent { this.tribeName = tribeName; } - @Override - public boolean runOnlyOnMaster() { - return false; - } - @Override public String describeTasks(List tasks) { return tasks.stream().map(ClusterChangedEvent::source).reduce((s1, s2) -> s1 + ", " + s2).orElse(""); } @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - BatchResult.Builder builder = BatchResult.builder(); + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { + ClusterTasksResult.Builder builder = ClusterTasksResult.builder(); ClusterState.Builder newState = ClusterState.builder(currentState).incrementVersion(); boolean clusterStateChanged = updateNodes(currentState, tasks, newState); clusterStateChanged |= updateIndicesAndMetaData(currentState, tasks, newState); diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java index 00edd536258..91420fa227a 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java @@ -88,7 +88,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa public void testEmptyTaskListProducesSameClusterState() throws Exception { List tasks = Collections.emptyList(); - ClusterStateTaskExecutor.BatchResult result = + ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(clusterState, tasks); assertTasksSuccessful(tasks, result, clusterState, false); } @@ -97,7 +97,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa String reason = "test duplicate failures are okay"; ClusterState currentState = createClusterStateWithStartedShards(reason); List tasks = createExistingShards(currentState, reason); - ClusterStateTaskExecutor.BatchResult result = executor.execute(currentState, tasks); + ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(currentState, tasks); assertTasksSuccessful(tasks, result, clusterState, true); } @@ -105,7 +105,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa String reason = "test non existent shards are marked as successful"; ClusterState currentState = createClusterStateWithStartedShards(reason); List tasks = createNonExistentShards(currentState, reason); - ClusterStateTaskExecutor.BatchResult result = executor.execute(clusterState, tasks); + ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(clusterState, tasks); assertTasksSuccessful(tasks, result, clusterState, false); } @@ -123,7 +123,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa List tasks = new ArrayList<>(); tasks.addAll(failingTasks); tasks.addAll(nonExistentTasks); - ClusterStateTaskExecutor.BatchResult result = failingExecutor.execute(currentState, tasks); + ClusterStateTaskExecutor.ClusterTasksResult result = failingExecutor.execute(currentState, tasks); Map taskResultMap = failingTasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.failure(new RuntimeException("simulated applyFailedShards failure")))); taskResultMap.putAll(nonExistentTasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.success()))); @@ -146,7 +146,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa task -> ClusterStateTaskExecutor.TaskResult.failure(new ShardStateAction.NoLongerPrimaryShardException(task.shardId, "primary term [" + task.primaryTerm + "] did not match current primary term [" + currentState.metaData().index(task.shardId.getIndex()).primaryTerm(task.shardId.id()) + "]")))); - ClusterStateTaskExecutor.BatchResult result = executor.execute(currentState, tasks); + ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(currentState, tasks); assertTaskResults(taskResultMap, result, currentState, false); } @@ -214,7 +214,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa private static void assertTasksSuccessful( List tasks, - ClusterStateTaskExecutor.BatchResult result, + ClusterStateTaskExecutor.ClusterTasksResult result, ClusterState clusterState, boolean clusterStateChanged ) { @@ -225,7 +225,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa private static void assertTaskResults( Map taskResultMap, - ClusterStateTaskExecutor.BatchResult result, + ClusterStateTaskExecutor.ClusterTasksResult result, ClusterState clusterState, boolean clusterStateChanged ) { diff --git a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index 937658736d6..2141cfa809b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; @@ -130,22 +131,17 @@ public class ClusterStateHealthTests extends ESTestCase { }); logger.info("--> submit task to restore master"); - clusterService.submitStateUpdateTask("restore master", new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("restore master", new LocalClusterUpdateTask() { @Override - public ClusterState execute(ClusterState currentState) throws Exception { - final DiscoveryNodes nodes = currentState.nodes(); - return ClusterState.builder(currentState).nodes(DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId())).build(); + public ClusterTasksResult execute(ClusterState currentState) throws Exception { + return newState(ClusterState.builder(currentState).nodes( + DiscoveryNodes.builder(currentState.nodes()).masterNodeId(currentState.nodes().getLocalNodeId())).build()); } @Override public void onFailure(String source, Exception e) { logger.warn("unexpected failure", e); } - - @Override - public boolean runOnlyOnMaster() { - return false; - } }); logger.info("--> waiting for listener to be called and cluster state being blocked"); diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java index 57086a0d1bf..cc76fdf9dc7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java @@ -25,6 +25,7 @@ import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskExecutor; @@ -137,6 +138,8 @@ public class ClusterServiceTests extends ESTestCase { }); timedClusterService.setClusterStatePublisher((event, ackListener) -> { }); + timedClusterService.setDiscoverySettings(new DiscoverySettings(Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); timedClusterService.start(); ClusterState state = timedClusterService.state(); final DiscoveryNodes nodes = state.nodes(); @@ -284,17 +287,12 @@ public class ClusterServiceTests extends ESTestCase { taskFailed[0] = true; final CountDownLatch latch2 = new CountDownLatch(1); - nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + nonMaster.submitStateUpdateTask("test", new LocalClusterUpdateTask() { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterTasksResult execute(ClusterState currentState) throws Exception { taskFailed[0] = false; latch2.countDown(); - return currentState; + return unchanged(); } @Override @@ -324,14 +322,9 @@ public class ClusterServiceTests extends ESTestCase { ClusterStateTaskConfig.build(Priority.NORMAL), new ClusterStateTaskExecutor() { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { ClusterState newClusterState = ClusterState.builder(currentState).build(); - return BatchResult.builder().successes(tasks).build(newClusterState); + return ClusterTasksResult.builder().successes(tasks).build(newClusterState); } @Override @@ -367,19 +360,9 @@ public class ClusterServiceTests extends ESTestCase { ClusterStateTaskConfig.build(Priority.NORMAL), new ClusterStateTaskExecutor() { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { ClusterState newClusterState = ClusterState.builder(currentState).build(); - return BatchResult.builder().successes(tasks).build(newClusterState); - } - - @Override - public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { - assertNotNull(assertionRef.get()); + return ClusterTasksResult.builder().successes(tasks).build(newClusterState); } }, new ClusterStateTaskListener() { @@ -419,16 +402,11 @@ public class ClusterServiceTests extends ESTestCase { class TaskExecutor implements ClusterStateTaskExecutor { @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { executionOrder.addAll(tasks); // do this first, so startedProcessing can be used as a notification that this is done. startedProcessing.release(tasks.size()); allowProcessing.acquire(tasks.size()); - return BatchResult.builder().successes(tasks).build(ClusterState.builder(currentState).build()); - } - - @Override - public boolean runOnlyOnMaster() { - return false; + return ClusterTasksResult.builder().successes(tasks).build(ClusterState.builder(currentState).build()); } } @@ -478,14 +456,9 @@ public class ClusterServiceTests extends ESTestCase { List tasks = new ArrayList<>(); @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { this.tasks.addAll(tasks); - return BatchResult.builder().successes(tasks).build(ClusterState.builder(currentState).build()); - } - - @Override - public boolean runOnlyOnMaster() { - return false; + return ClusterTasksResult.builder().successes(tasks).build(ClusterState.builder(currentState).build()); } } @@ -573,7 +546,7 @@ public class ClusterServiceTests extends ESTestCase { (currentState, taskList) -> { assertThat(taskList.size(), equalTo(tasks.size())); assertThat(taskList.stream().collect(Collectors.toSet()), equalTo(tasks.keySet())); - return ClusterStateTaskExecutor.BatchResult.builder().successes(taskList).build(currentState); + return ClusterStateTaskExecutor.ClusterTasksResult.builder().successes(taskList).build(currentState); }); latch.await(); @@ -637,7 +610,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { for (Set expectedSet : taskGroups) { long count = tasks.stream().filter(expectedSet::contains).count(); assertThat("batched set should be executed together or not at all. Expected " + expectedSet + "s. Executing " + tasks, @@ -651,12 +624,7 @@ public class ClusterServiceTests extends ESTestCase { batches.incrementAndGet(); semaphore.acquire(); } - return BatchResult.builder().successes(tasks).build(maybeUpdatedClusterState); - } - - @Override - public boolean runOnlyOnMaster() { - return false; + return ClusterTasksResult.builder().successes(tasks).build(maybeUpdatedClusterState); } @Override @@ -812,7 +780,7 @@ public class ClusterServiceTests extends ESTestCase { clusterService.submitStateUpdateTask("blocking", blockingTask); ClusterStateTaskExecutor executor = (currentState, tasks) -> - ClusterStateTaskExecutor.BatchResult.builder().successes(tasks).build(currentState); + ClusterStateTaskExecutor.ClusterTasksResult.builder().successes(tasks).build(currentState); SimpleTask task = new SimpleTask(1); ClusterStateTaskListener listener = new ClusterStateTaskListener() { @@ -1109,6 +1077,8 @@ public class ClusterServiceTests extends ESTestCase { throw new Discovery.FailedToCommitClusterStateException("just to test this"); } }); + timedClusterService.setDiscoverySettings(new DiscoverySettings(Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); timedClusterService.start(); ClusterState state = timedClusterService.state(); final DiscoveryNodes nodes = state.nodes(); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index bd9e3db88b4..c5c78189ee1 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -117,8 +117,7 @@ public class NodeJoinControllerTests extends ESTestCase { setState(clusterService, ClusterState.builder(clusterService.state()).nodes( DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.getId()))); nodeJoinController = new NodeJoinController(clusterService, createAllocationService(Settings.EMPTY), - new ElectMasterService(Settings.EMPTY), new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, - ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), Settings.EMPTY); + new ElectMasterService(Settings.EMPTY), Settings.EMPTY); } @After diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java index f135bb5eaa1..70b291fee60 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java @@ -32,7 +32,6 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -67,7 +66,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { .map(node -> new ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task(node, randomBoolean() ? "left" : "failed")) .collect(Collectors.toList()); - final ClusterStateTaskExecutor.BatchResult result + final ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(clusterState, tasks); assertThat(result.resultingState, equalTo(clusterState)); } @@ -106,7 +105,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { } final ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(builder).build(); - final ClusterStateTaskExecutor.BatchResult result = + final ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(clusterState, tasks); verify(electMasterService).hasEnoughMasterNodes(eq(remainingNodesClusterState.get().nodes())); verifyNoMoreInteractions(electMasterService); @@ -156,7 +155,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { } final ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(builder).build(); - final ClusterStateTaskExecutor.BatchResult result = + final ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(clusterState, tasks); verify(electMasterService).hasEnoughMasterNodes(eq(remainingNodesClusterState.get().nodes())); verifyNoMoreInteractions(electMasterService); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index 174c6788bd9..59c29a5b6a0 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -395,9 +395,9 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // disable relocations when we do this, to make sure the shards are not relocated from node2 // due to rebalancing, and delete its content client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)).get(); - internalCluster().getInstance(ClusterService.class, nonMasterNode).submitStateUpdateTask("test", new ClusterStateUpdateTask(Priority.IMMEDIATE) { + internalCluster().getInstance(ClusterService.class, nonMasterNode).submitStateUpdateTask("test", new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterTasksResult execute(ClusterState currentState) throws Exception { IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); for (int i = 0; i < numShards; i++) { indexRoutingTableBuilder.addIndexShard( @@ -406,14 +406,9 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { .build() ); } - return ClusterState.builder(currentState) + return newState(ClusterState.builder(currentState) .routingTable(RoutingTable.builder().add(indexRoutingTableBuilder).build()) - .build(); - } - - @Override - public boolean runOnlyOnMaster() { - return false; + .build()); } @Override diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index 179d977ea5d..2153390d610 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -22,10 +22,10 @@ package org.elasticsearch.tribe; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalClusterUpdateTask; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.MetaData; @@ -556,7 +556,7 @@ public class TribeIT extends ESIntegTestCase { final CountDownLatch latch = new CountDownLatch(1); clusterService.submitStateUpdateTask("update customMetaData", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override - public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { latch.countDown(); } @@ -564,7 +564,7 @@ public class TribeIT extends ESIntegTestCase { public ClusterState execute(ClusterState currentState) throws Exception { MetaData.Builder builder = MetaData.builder(currentState.metaData()); builder = addCustoms.apply(builder); - return new ClusterState.Builder(currentState).metaData(builder).build(); + return ClusterState.builder(currentState).metaData(builder).build(); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java index 3e3896dfc2c..933fd83ad5c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java @@ -20,15 +20,15 @@ package org.elasticsearch.test; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; @@ -65,6 +65,8 @@ public class ClusterServiceUtils { }); clusterService.setClusterStatePublisher((event, ackListener) -> { }); + clusterService.setDiscoverySettings(new DiscoverySettings(Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); clusterService.start(); final DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterService.state().nodes()); nodes.masterNodeId(clusterService.localNode().getId()); @@ -84,16 +86,11 @@ public class ClusterServiceUtils { public static void setState(ClusterService clusterService, ClusterState clusterState) { CountDownLatch latch = new CountDownLatch(1); - clusterService.submitStateUpdateTask("test setting state", new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("test setting state", new LocalClusterUpdateTask() { @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterTasksResult execute(ClusterState currentState) throws Exception { // make sure we increment versions as listener may depend on it for change - return ClusterState.builder(clusterState).version(currentState.version() + 1).build(); - } - - @Override - public boolean runOnlyOnMaster() { - return false; + return newState(ClusterState.builder(clusterState).version(currentState.version() + 1).build()); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java index 956088f0fd1..8a6be290502 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java @@ -19,7 +19,7 @@ package org.elasticsearch.test.disruption; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.unit.TimeValue; @@ -58,21 +58,16 @@ public class BlockClusterStateProcessing extends SingleNodeDisruption { boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1)); assert success : "startDisrupting called without waiting on stopDisrupting to complete"; final CountDownLatch started = new CountDownLatch(1); - clusterService.submitStateUpdateTask("service_disruption_block", new ClusterStateUpdateTask(Priority.IMMEDIATE) { + clusterService.submitStateUpdateTask("service_disruption_block", new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterTasksResult execute(ClusterState currentState) throws Exception { started.countDown(); CountDownLatch latch = disruptionLatch.get(); if (latch != null) { latch.await(); } - return currentState; + return unchanged(); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java index f69c0a3085d..61afa4f77f3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java @@ -19,7 +19,7 @@ package org.elasticsearch.test.disruption; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.unit.TimeValue; @@ -102,15 +102,10 @@ public class SlowClusterStateProcessing extends SingleNodeDisruption { return false; } final AtomicBoolean stopped = new AtomicBoolean(false); - clusterService.submitStateUpdateTask("service_disruption_delay", new ClusterStateUpdateTask(Priority.IMMEDIATE) { + clusterService.submitStateUpdateTask("service_disruption_delay", new LocalClusterUpdateTask(Priority.IMMEDIATE) { @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterTasksResult execute(ClusterState currentState) throws Exception { long count = duration.millis() / 200; // wait while checking for a stopped for (; count > 0 && !stopped.get(); count--) { @@ -120,7 +115,7 @@ public class SlowClusterStateProcessing extends SingleNodeDisruption { Thread.sleep(duration.millis() % 200); } countDownLatch.countDown(); - return currentState; + return unchanged(); } @Override From 432ec54347516b38c5dddc7262ecb392b7e51ca3 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 23 Dec 2016 06:36:00 -0500 Subject: [PATCH 013/119] Apply logging levels in hierarchical order This commit adds a test for applying logging levels in hierarchical order, and addresses an issue with restoring the logging levels at the end of a test or suite. --- .../resources/checkstyle_suppressions.xml | 1 - .../test/junit/listeners/LoggingListener.java | 108 +++++++++++------- .../test/test/LoggingListenerTests.java | 19 ++- 3 files changed, 84 insertions(+), 44 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 91ac28441e7..969f7580fea 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -1001,7 +1001,6 @@ - diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java index 88be2be8cfb..4a59db64640 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.test.junit.listeners; import org.apache.logging.log4j.Logger; @@ -26,19 +27,18 @@ import org.junit.runner.Description; import org.junit.runner.Result; import org.junit.runner.notification.RunListener; -import java.util.HashMap; +import java.util.Collections; import java.util.Map; import java.util.TreeMap; /** - * A {@link RunListener} that allows to change the log level for a specific test method. - * When a test method is annotated with the {@link org.elasticsearch.test.junit.annotations.TestLogging} annotation, the level for the specified loggers - * will be internally saved before the test method execution and overridden with the specified ones. - * At the end of the test method execution the original loggers levels will be restored. + * A {@link RunListener} that allows changing the log level for a specific test method. When a test method is annotated with the + * {@link TestLogging} annotation, the level for the specified loggers will be internally saved before the test method execution and + * overridden with the specified ones. At the end of the test method execution the original loggers levels will be restored. * - * Note: This class is not thread-safe. Given the static nature of the logging api, it assumes that tests - * are never run concurrently in the same jvm. For the very same reason no synchronization has been implemented - * regarding the save/restore process of the original loggers levels. + * This class is not thread-safe. Given the static nature of the logging API, it assumes that tests are never run concurrently in the same + * JVM. For the very same reason no synchronization has been implemented regarding the save/restore process of the original loggers + * levels. */ public class LoggingListener extends RunListener { @@ -47,29 +47,35 @@ public class LoggingListener extends RunListener { private Map previousPackageLoggingMap; @Override - public void testRunStarted(Description description) throws Exception { + public void testRunStarted(final Description description) throws Exception { Package testClassPackage = description.getTestClass().getPackage(); previousPackageLoggingMap = processTestLogging(testClassPackage != null ? testClassPackage.getAnnotation(TestLogging.class) : null); previousClassLoggingMap = processTestLogging(description.getAnnotation(TestLogging.class)); } @Override - public void testRunFinished(Result result) throws Exception { + public void testRunFinished(final Result result) throws Exception { previousClassLoggingMap = reset(previousClassLoggingMap); previousPackageLoggingMap = reset(previousPackageLoggingMap); } @Override - public void testStarted(Description description) throws Exception { + public void testStarted(final Description description) throws Exception { final TestLogging testLogging = description.getAnnotation(TestLogging.class); previousLoggingMap = processTestLogging(testLogging); } @Override - public void testFinished(Description description) throws Exception { + public void testFinished(final Description description) throws Exception { previousLoggingMap = reset(previousLoggingMap); } + /** + * Obtain the logger with the given name. + * + * @param loggerName the logger to obtain + * @return the logger + */ private static Logger resolveLogger(String loggerName) { if (loggerName.equalsIgnoreCase("_root")) { return ESLoggerFactory.getRootLogger(); @@ -77,49 +83,69 @@ public class LoggingListener extends RunListener { return Loggers.getLogger(loggerName); } - private Map processTestLogging(TestLogging testLogging) { - Map map = getLoggersAndLevelsFromAnnotation(testLogging); + /** + * Applies the test logging annotation and returns the existing logging levels. + * + * @param testLogging the test logging annotation to apply + * @return the existing logging levels + */ + private Map processTestLogging(final TestLogging testLogging) { + final Map map = getLoggersAndLevelsFromAnnotation(testLogging); + if (map == null) { - return null; + return Collections.emptyMap(); } - // sort the logging keys so they wouldn't override each other. - // for example, processing org.elasticsearch:DEBUG after org.elasticsearch.transport:TRACE - // will reset the later - TreeMap sortedLogNames = new TreeMap<>(String::compareTo); - sortedLogNames.putAll(map); - Map previousValues = new HashMap<>(); - for (Map.Entry entry : sortedLogNames.entrySet()) { - Logger logger = resolveLogger(entry.getKey()); - previousValues.put(entry.getKey(), logger.getLevel().toString()); + + // obtain the existing logging levels so that we can restore them at the end of the test; we have to do this separately from setting + // the logging levels so that setting foo does not impact the logging level for foo.bar when we check the existing logging level for + // for.bar + final Map existing = new TreeMap<>(); + for (final Map.Entry entry : map.entrySet()) { + final Logger logger = resolveLogger(entry.getKey()); + existing.put(entry.getKey(), logger.getLevel().toString()); + } + for (final Map.Entry entry : map.entrySet()) { + final Logger logger = resolveLogger(entry.getKey()); Loggers.setLevel(logger, entry.getValue()); } - return previousValues; + return existing; } - public static Map getLoggersAndLevelsFromAnnotation(TestLogging testLogging) { + /** + * Obtain the logging levels from the test logging annotation. + * + * @param testLogging the test logging annotation + * @return a map from logger name to logging level + */ + private static Map getLoggersAndLevelsFromAnnotation(final TestLogging testLogging) { if (testLogging == null) { - return null; + return Collections.emptyMap(); } - Map map = new HashMap<>(); + // use a sorted set so that we apply a parent logger before its children thus not overwriting the child setting when processing the + // parent setting + final Map map = new TreeMap<>(); final String[] loggersAndLevels = testLogging.value().split(","); - for (String loggerAndLevel : loggersAndLevels) { - String[] loggerAndLevelArray = loggerAndLevel.split(":"); - if (loggerAndLevelArray.length >=2) { - String loggerName = loggerAndLevelArray[0]; - String level = loggerAndLevelArray[1]; - map.put(loggerName, level); + for (final String loggerAndLevel : loggersAndLevels) { + final String[] loggerAndLevelArray = loggerAndLevel.split(":"); + if (loggerAndLevelArray.length >= 2) { + map.put(loggerAndLevelArray[0], loggerAndLevelArray[1]); } } return map; } - private Map reset(Map map) { - if (map != null) { - for (Map.Entry previousLogger : map.entrySet()) { - Logger logger = resolveLogger(previousLogger.getKey()); - Loggers.setLevel(logger, previousLogger.getValue()); - } + /** + * Reset the logging levels to the state provided by the map. + * + * @param map the logging levels to apply + * @return an empty map + */ + private Map reset(final Map map) { + for (final Map.Entry previousLogger : map.entrySet()) { + final Logger logger = resolveLogger(previousLogger.getKey()); + Loggers.setLevel(logger, previousLogger.getValue()); } - return null; + + return Collections.emptyMap(); } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java index f5f1cb77a73..b2fa68359b1 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java @@ -82,27 +82,42 @@ public class LoggingListenerTests extends ESTestCase { Logger abcLogger = Loggers.getLogger("abc"); Logger xyzLogger = Loggers.getLogger("xyz"); + // we include foo and foo.bar to maintain that logging levels are applied from the top of the hierarchy down; this ensures that + // setting the logging level for a parent logger and a child logger applies the parent level first and then the child as otherwise + // setting the parent level would overwrite the child level + Logger fooLogger = Loggers.getLogger("foo"); + Logger fooBarLogger = Loggers.getLogger("foo.bar"); final Level level = ESLoggerFactory.getRootLogger().getLevel(); assertThat(xyzLogger.getLevel(), equalTo(level)); assertThat(abcLogger.getLevel(), equalTo(level)); + assertThat(fooLogger.getLevel(), equalTo(level)); + assertThat(fooBarLogger.getLevel(), equalTo(level)); loggingListener.testRunStarted(suiteDescription); assertThat(xyzLogger.getLevel(), equalTo(level)); assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); + assertThat(fooLogger.getLevel(), equalTo(Level.WARN)); + assertThat(fooBarLogger.getLevel(), equalTo(Level.ERROR)); Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "test"); loggingListener.testStarted(testDescription); assertThat(xyzLogger.getLevel(), equalTo(level)); assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); + assertThat(fooLogger.getLevel(), equalTo(Level.WARN)); + assertThat(fooBarLogger.getLevel(), equalTo(Level.ERROR)); loggingListener.testFinished(testDescription); assertThat(xyzLogger.getLevel(), equalTo(level)); assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); + assertThat(fooLogger.getLevel(), equalTo(Level.WARN)); + assertThat(fooBarLogger.getLevel(), equalTo(Level.ERROR)); loggingListener.testRunFinished(new Result()); assertThat(xyzLogger.getLevel(), equalTo(level)); assertThat(abcLogger.getLevel(), equalTo(level)); + assertThat(fooLogger.getLevel(), equalTo(level)); + assertThat(fooBarLogger.getLevel(), equalTo(level)); } public void testCustomLevelPerClassAndPerMethod() throws Exception { @@ -151,7 +166,7 @@ public class LoggingListenerTests extends ESTestCase { /** * dummy class used to create a junit suite description that has the @TestLogging annotation */ - @TestLogging("abc:WARN") + @TestLogging("abc:WARN,foo:WARN,foo.bar:ERROR") public static class AnnotatedTestClass { } @@ -162,7 +177,7 @@ public class LoggingListenerTests extends ESTestCase { public static class TestClass { @SuppressWarnings("unused") - @TestLogging("xyz:TRACE") + @TestLogging("xyz:TRACE,foo:WARN,foo.bar:ERROR") public void annotatedTestMethod() {} @SuppressWarnings("unused") From ddf4a463f3f006ae8c6e899d8f5c4a52bdff06b7 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 23 Dec 2016 06:48:50 -0500 Subject: [PATCH 014/119] Reject invalid test logging annotations Today we silently ignore invalid test logging annotations. This commit rejects these annotations, failing the processing of the annotation and aborting the test. --- .../test/junit/listeners/LoggingListener.java | 4 +- .../test/test/LoggingListenerTests.java | 59 +++++++++++++++++-- 2 files changed, 58 insertions(+), 5 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java index 4a59db64640..e021df52c60 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java @@ -127,8 +127,10 @@ public class LoggingListener extends RunListener { final String[] loggersAndLevels = testLogging.value().split(","); for (final String loggerAndLevel : loggersAndLevels) { final String[] loggerAndLevelArray = loggerAndLevel.split(":"); - if (loggerAndLevelArray.length >= 2) { + if (loggerAndLevelArray.length == 2) { map.put(loggerAndLevelArray[0], loggerAndLevelArray[1]); + } else { + throw new IllegalArgumentException("invalid test logging annotation [" + loggerAndLevel + "]"); } } return map; diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java index b2fa68359b1..0845fc2546f 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java @@ -163,8 +163,33 @@ public class LoggingListenerTests extends ESTestCase { assertThat(abcLogger.getLevel(), equalTo(level)); } + public void testInvalidClassTestLoggingAnnotation() throws Exception { + final LoggingListener loggingListener = new LoggingListener(); + + final Description suiteDescription = Description.createSuiteDescription(InvalidClass.class); + + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> loggingListener.testRunStarted(suiteDescription)); + assertThat(e.getMessage(), equalTo("invalid test logging annotation [abc]")); + } + + public void testInvalidMethodTestLoggingAnnotation() throws Exception { + final LoggingListener loggingListener = new LoggingListener(); + + final Description suiteDescription = Description.createSuiteDescription(InvalidMethod.class); + + loggingListener.testRunStarted(suiteDescription); + + final Method method = InvalidMethod.class.getMethod("invalidMethod"); + final TestLogging annotation = method.getAnnotation(TestLogging.class); + Description testDescription = Description.createTestDescription(InvalidMethod.class, "invalidMethod", annotation); + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> loggingListener.testStarted(testDescription)); + assertThat(e.getMessage(), equalTo("invalid test logging annotation [abc:INFO:WARN]")); + } + /** - * dummy class used to create a junit suite description that has the @TestLogging annotation + * Dummy class used to create a JUnit suite description that has the {@link TestLogging} annotation. */ @TestLogging("abc:WARN,foo:WARN,foo.bar:ERROR") public static class AnnotatedTestClass { @@ -172,17 +197,43 @@ public class LoggingListenerTests extends ESTestCase { } /** - * dummy class used to create a junit suite description that doesn't have the @TestLogging annotation, but its test methods have it + * Dummy class used to create a JUnit suite description that doesn't have the {@link TestLogging} annotation, but its test methods have + * it. */ public static class TestClass { @SuppressWarnings("unused") @TestLogging("xyz:TRACE,foo:WARN,foo.bar:ERROR") - public void annotatedTestMethod() {} + public void annotatedTestMethod() { + + } @SuppressWarnings("unused") @TestLogging("abc:TRACE,xyz:DEBUG") - public void annotatedTestMethod2() {} + public void annotatedTestMethod2() { + + } + + } + + /** + * Dummy class with an invalid {@link TestLogging} annotation. + */ + @TestLogging("abc") + public static class InvalidClass { + + } + + /** + * Dummy class with an invalid {@link TestLogging} annotation on a method. + */ + public static class InvalidMethod { + + @SuppressWarnings("unused") + @TestLogging("abc:INFO:WARN") + public void invalidMethod() { + + } } From c2baa5f213cb181c3e4ba34fca9c11e8ce5d4d6b Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Fri, 23 Dec 2016 13:40:54 +0100 Subject: [PATCH 015/119] TransportService should capture listener before spawning background notification task Not doing this made it difficult to establish a happens before relationship between connecting to a node and adding a listeners. Causing test code like this to fail sproadically: ``` // connection to reuse handleA.transportService.connectToNode(handleB.node); // install a listener to check that no new connections are made handleA.transportService.addConnectionListener(new TransportConnectionListener() { @Override public void onConnectionOpened(DiscoveryNode node) { fail("should not open any connections. got [" + node + "]"); } }); ``` relates to #22277 --- .../transport/TransportService.java | 21 ++++++++++--------- .../discovery/zen/UnicastZenPingTests.java | 2 -- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index e76210ff195..9774e5e0f66 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -64,6 +64,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ScheduledFuture; import java.util.function.Function; import java.util.function.Supplier; +import java.util.stream.Stream; import static java.util.Collections.emptyList; import static org.elasticsearch.common.settings.Setting.listSetting; @@ -807,20 +808,20 @@ public class TransportService extends AbstractLifecycleComponent { @Override public void onNodeConnected(final DiscoveryNode node) { - threadPool.generic().execute(() -> { - for (TransportConnectionListener connectionListener : connectionListeners) { - connectionListener.onNodeConnected(node); - } - }); + // capture listeners before spawning the background callback so the following pattern won't trigger a call + // connectToNode(); connection is completed successfully + // addConnectionListener(); this listener shouldn't be called + final Stream listenersToNotify = TransportService.this.connectionListeners.stream(); + threadPool.generic().execute(() -> listenersToNotify.forEach(listener -> listener.onNodeConnected(node))); } @Override public void onConnectionOpened(DiscoveryNode node) { - threadPool.generic().execute(() -> { - for (TransportConnectionListener connectionListener : connectionListeners) { - connectionListener.onConnectionOpened(node); - } - }); + // capture listeners before spawning the background callback so the following pattern won't trigger a call + // connectToNode(); connection is completed successfully + // addConnectionListener(); this listener shouldn't be called + final Stream listenersToNotify = TransportService.this.connectionListeners.stream(); + threadPool.generic().execute(() -> listenersToNotify.forEach(listener -> listener.onConnectionOpened(node))); } @Override diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index db81956865a..6e263f474dc 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -41,7 +41,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -540,7 +539,6 @@ public class UnicastZenPingTests extends ESTestCase { } } - @TestLogging("org.elasticsearch:DEBUG,org.elasticsearch.discovery:TRACE,org.elasticsearch.transport:TRACE") public void testResolveReuseExistingNodeConnections() throws ExecutionException, InterruptedException { final Settings settings = Settings.builder().put("cluster.name", "test").put(TransportSettings.PORT.getKey(), 0).build(); From 1243abfeccd5424aced7e8c0612484d131160362 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Fri, 23 Dec 2016 08:17:16 -0500 Subject: [PATCH 016/119] build: do not use cached snapshots for backwards compatibility tests The backwards compatibility tests rely on gradle's built-in mechanisms for resolving dependencies to get the zip of the older version we test against. By default, this will cache snapshots for 24 hours, which can lead to unexpected failures in CI. This change makes the special configurations for backwards compatibility always update their snapshots by setting the amount of time to cache to 0 seconds. --- .../org/elasticsearch/gradle/test/ClusterFormationTasks.groovy | 3 +++ 1 file changed, 3 insertions(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 4c6771ccda7..756c05b07d5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -39,6 +39,7 @@ import org.gradle.api.tasks.Delete import org.gradle.api.tasks.Exec import java.nio.file.Paths +import java.util.concurrent.TimeUnit /** * A helper for creating tasks to build a cluster that is used by a task, and tear down the cluster when the task is finished. @@ -91,6 +92,8 @@ class ClusterFormationTasks { configureBwcPluginDependency("${task.name}_elasticsearchBwcPlugins", project, entry.getValue(), project.configurations.elasticsearchBwcPlugins, config.bwcVersion) } + project.configurations.elasticsearchBwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS) + project.configurations.elasticsearchBwcPlugins.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS) } for (int i = 0; i < config.numNodes; i++) { // we start N nodes and out of these N nodes there might be M bwc nodes. From 2713549533bdeb944c436e1d0f2f199a2ef8b2b6 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 23 Dec 2016 09:44:56 -0500 Subject: [PATCH 017/119] Use reader for doc stats Today we try to pull stats from index writer but we do not get a consistent view of stats. Under heavy indexing, this inconsistency can be very skewed indeed. In particular, it can lead to the number of deleted docs being reported as negative and this leads to serialization issues. Instead, we should provide a consistent view of the stats by using an index reader. Relates #22317 --- .../elasticsearch/index/engine/Engine.java | 11 -- .../index/engine/InternalEngine.java | 9 -- .../elasticsearch/index/shard/IndexShard.java | 6 +- .../index/engine/InternalEngineTests.java | 27 ---- .../index/engine/ShadowEngineTests.java | 27 ---- .../index/shard/IndexShardTests.java | 93 +++++++++++++- .../indices/stats/IndexStatsIT.java | 116 ++++++++++++++++++ .../reference/indices/rollover-index.asciidoc | 2 + 8 files changed, 213 insertions(+), 78 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index b081d76f3c7..663a10791b6 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -64,7 +64,6 @@ import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.seqno.SequenceNumbersService; -import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; @@ -1374,16 +1373,6 @@ public abstract class Engine implements Closeable { return this.lastWriteNanos; } - /** - * Returns the engines current document statistics - */ - public DocsStats getDocStats() { - try (Engine.Searcher searcher = acquireSearcher("doc_stats")) { - IndexReader reader = searcher.reader(); - return new DocsStats(reader.numDocs(), reader.numDeletedDocs()); - } - } - /** * Called for each new opened engine searcher to warm new segments * diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 058ed0a19fc..a18ca7f280e 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -64,7 +64,6 @@ import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbersService; -import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.ElasticsearchMergePolicy; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; @@ -1620,14 +1619,6 @@ public class InternalEngine extends Engine { return seqNoService; } - @Override - public DocsStats getDocStats() { - final int numDocs = indexWriter.numDocs(); - final int maxDoc = indexWriter.maxDoc(); - return new DocsStats(numDocs, maxDoc-numDocs); - } - - /** * Returns the number of times a version was looked up either from the index. * Note this is only available if assertions are enabled diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index fc6eac196d8..b9eb50545da 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -669,9 +669,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } public DocsStats docStats() { - readAllowed(); - final Engine engine = getEngine(); - return engine.getDocStats(); + try (final Engine.Searcher searcher = acquireSearcher("doc_stats")) { + return new DocsStats(searcher.reader().numDocs(), searcher.reader().numDeletedDocs()); + } } /** diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 065a6d74f07..a7620901826 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -2548,33 +2548,6 @@ public class InternalEngineTests extends ESTestCase { } - public void testDocStats() throws IOException { - final int numDocs = randomIntBetween(2, 10); // at least 2 documents otherwise we don't see any deletes below - for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - Engine.IndexResult indexResult = engine.index(firstIndexRequest); - assertThat(indexResult.getVersion(), equalTo(1L)); - } - DocsStats docStats = engine.getDocStats(); - assertEquals(numDocs, docStats.getCount()); - assertEquals(0, docStats.getDeleted()); - engine.forceMerge(randomBoolean(), 1, false, false, false); - - ParsedDocument doc = testParsedDocument(Integer.toString(0), Integer.toString(0), "test", null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(0)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - Engine.IndexResult index = engine.index(firstIndexRequest); - assertThat(index.getVersion(), equalTo(2L)); - engine.flush(); // flush - buffered deletes are not counted - docStats = engine.getDocStats(); - assertEquals(1, docStats.getDeleted()); - assertEquals(numDocs, docStats.getCount()); - engine.forceMerge(randomBoolean(), 1, false, false, false); - docStats = engine.getDocStats(); - assertEquals(0, docStats.getDeleted()); - assertEquals(numDocs, docStats.getCount()); - } - public void testDoubleDelivery() throws IOException { final ParsedDocument doc = testParsedDocument("1", "1", "test", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); Engine.Index operation = randomAppendOnly(1, doc, false); diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index a3c58f25ea9..a7470666d63 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -984,33 +984,6 @@ public class ShadowEngineTests extends ESTestCase { } } - public void testDocStats() throws IOException { - final int numDocs = randomIntBetween(2, 10); // at least 2 documents otherwise we don't see any deletes below - for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - Engine.IndexResult indexResult = primaryEngine.index(firstIndexRequest); - assertThat(indexResult.getVersion(), equalTo(1L)); - } - DocsStats docStats = primaryEngine.getDocStats(); - assertEquals(numDocs, docStats.getCount()); - assertEquals(0, docStats.getDeleted()); - - docStats = replicaEngine.getDocStats(); - assertEquals(0, docStats.getCount()); - assertEquals(0, docStats.getDeleted()); - primaryEngine.flush(); - - docStats = replicaEngine.getDocStats(); - assertEquals(0, docStats.getCount()); - assertEquals(0, docStats.getDeleted()); - replicaEngine.refresh("test"); - docStats = replicaEngine.getDocStats(); - assertEquals(numDocs, docStats.getCount()); - assertEquals(0, docStats.getDeleted()); - primaryEngine.forceMerge(randomBoolean(), 1, false, false, false); - } - public void testRefreshListenersFails() throws IOException { EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), new RefreshListeners(null, null, null, logger)); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 036faa9b903..135d77a34ab 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -33,6 +33,7 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.util.Constants; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.ShardStats; @@ -57,11 +58,13 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.fielddata.FieldDataStats; @@ -73,6 +76,7 @@ import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; @@ -112,13 +116,15 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.common.lucene.Lucene.cleanLuceneIndex; -import static org.elasticsearch.common.lucene.Lucene.readScoreDoc; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -1357,6 +1363,91 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(sourceShard, targetShard); } + public void testDocStats() throws IOException { + IndexShard indexShard = null; + try { + indexShard = newStartedShard(); + final long numDocs = randomIntBetween(2, 32); // at least two documents so we have docs to delete + final long numDocsToDelete = randomIntBetween(1, Math.toIntExact(numDocs)); + for (int i = 0; i < numDocs; i++) { + final String id = Integer.toString(i); + final ParsedDocument doc = + testParsedDocument(id, id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null); + final Engine.Index index = + new Engine.Index( + new Term("_uid", id), + doc, + SequenceNumbersService.UNASSIGNED_SEQ_NO, + 0, + Versions.MATCH_ANY, + VersionType.INTERNAL, + PRIMARY, + System.nanoTime(), + -1, + false); + final Engine.IndexResult result = indexShard.index(index); + assertThat(result.getVersion(), equalTo(1L)); + } + + indexShard.refresh("test"); + { + final DocsStats docsStats = indexShard.docStats(); + assertThat(docsStats.getCount(), equalTo(numDocs)); + assertThat(docsStats.getDeleted(), equalTo(0L)); + } + + final List ids = randomSubsetOf( + Math.toIntExact(numDocsToDelete), + IntStream.range(0, Math.toIntExact(numDocs)).boxed().collect(Collectors.toList())); + for (final Integer i : ids) { + final String id = Integer.toString(i); + final ParsedDocument doc = testParsedDocument(id, id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null); + final Engine.Index index = + new Engine.Index( + new Term("_uid", id), + doc, + SequenceNumbersService.UNASSIGNED_SEQ_NO, + 0, + Versions.MATCH_ANY, + VersionType.INTERNAL, + PRIMARY, + System.nanoTime(), + -1, + false); + final Engine.IndexResult result = indexShard.index(index); + assertThat(result.getVersion(), equalTo(2L)); + } + + // flush the buffered deletes + final FlushRequest flushRequest = new FlushRequest(); + flushRequest.force(false); + flushRequest.waitIfOngoing(false); + indexShard.flush(flushRequest); + + indexShard.refresh("test"); + { + final DocsStats docStats = indexShard.docStats(); + assertThat(docStats.getCount(), equalTo(numDocs)); + assertThat(docStats.getDeleted(), equalTo(numDocsToDelete)); + } + + // merge them away + final ForceMergeRequest forceMergeRequest = new ForceMergeRequest(); + forceMergeRequest.onlyExpungeDeletes(randomBoolean()); + forceMergeRequest.maxNumSegments(1); + indexShard.forceMerge(forceMergeRequest); + + indexShard.refresh("test"); + { + final DocsStats docStats = indexShard.docStats(); + assertThat(docStats.getCount(), equalTo(numDocs)); + assertThat(docStats.getDeleted(), equalTo(0L)); + } + } finally { + closeShards(indexShard); + } + } + /** A dummy repository for testing which just needs restore overridden */ private abstract static class RestoreOnlyRepository extends AbstractLifecycleComponent implements Repository { private final String indexName; diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 3d9e66755eb..519df00c06c 100644 --- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -21,16 +21,20 @@ package org.elasticsearch.indices.stats; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; @@ -54,14 +58,27 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumSet; +import java.util.List; import java.util.Random; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.emptyCollectionOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -1068,4 +1085,103 @@ public class IndexStatsIT extends ESIntegTestCase { assertThat(response.getTotal().queryCache.getMemorySizeInBytes(), equalTo(0L)); } + /** + * Test that we can safely concurrently index and get stats. This test was inspired by a serialization issue that arose due to a race + * getting doc stats during heavy indexing. The race could lead to deleted docs being negative which would then be serialized as a + * variable-length long. Since serialization of negative longs using a variable-length format was unsupported + * ({@link org.elasticsearch.common.io.stream.StreamOutput#writeVLong(long)}), the stream would become corrupted. Here, we want to test + * that we can continue to get stats while indexing. + */ + public void testConcurrentIndexingAndStatsRequests() throws BrokenBarrierException, InterruptedException, ExecutionException { + final AtomicInteger idGenerator = new AtomicInteger(); + final int numberOfIndexingThreads = Runtime.getRuntime().availableProcessors(); + final int numberOfStatsThreads = 4 * numberOfIndexingThreads; + final CyclicBarrier barrier = new CyclicBarrier(1 + numberOfIndexingThreads + numberOfStatsThreads); + final AtomicBoolean stop = new AtomicBoolean(); + final List threads = new ArrayList<>(numberOfIndexingThreads + numberOfIndexingThreads); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicBoolean failed = new AtomicBoolean(); + final AtomicReference> shardFailures = new AtomicReference<>(new CopyOnWriteArrayList<>()); + final AtomicReference> executionFailures = new AtomicReference<>(new CopyOnWriteArrayList<>()); + + // increasing the number of shards increases the number of chances any one stats request will hit a race + final CreateIndexRequest createIndexRequest = + new CreateIndexRequest("test", Settings.builder().put("index.number_of_shards", 10).build()); + client().admin().indices().create(createIndexRequest).get(); + + // start threads that will index concurrently with stats requests + for (int i = 0; i < numberOfIndexingThreads; i++) { + final Thread thread = new Thread(() -> { + try { + barrier.await(); + } catch (final BrokenBarrierException | InterruptedException e) { + failed.set(true); + executionFailures.get().add(e); + latch.countDown(); + } + while (!stop.get()) { + final String id = Integer.toString(idGenerator.incrementAndGet()); + final IndexResponse response = + client() + .prepareIndex("test", "type", id) + .setSource("{}") + .get(); + assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); + } + }); + thread.setName("indexing-" + i); + threads.add(thread); + thread.start(); + } + + // start threads that will get stats concurrently with indexing + for (int i = 0; i < numberOfStatsThreads; i++) { + final Thread thread = new Thread(() -> { + try { + barrier.await(); + } catch (final BrokenBarrierException | InterruptedException e) { + failed.set(true); + executionFailures.get().add(e); + latch.countDown(); + } + final IndicesStatsRequest request = new IndicesStatsRequest(); + request.all(); + request.indices(new String[0]); + while (!stop.get()) { + try { + final IndicesStatsResponse response = client().admin().indices().stats(request).get(); + if (response.getFailedShards() > 0) { + failed.set(true); + shardFailures.get().addAll(Arrays.asList(response.getShardFailures())); + latch.countDown(); + } + } catch (final ExecutionException | InterruptedException e) { + failed.set(true); + executionFailures.get().add(e); + latch.countDown(); + } + } + }); + thread.setName("stats-" + i); + threads.add(thread); + thread.start(); + } + + // release the hounds + barrier.await(); + + // wait for a failure, or for fifteen seconds to elapse + latch.await(15, TimeUnit.SECONDS); + + // stop all threads and wait for them to complete + stop.set(true); + for (final Thread thread : threads) { + thread.join(); + } + + assertThat(shardFailures.get(), emptyCollectionOf(ShardOperationFailedException.class)); + assertThat(executionFailures.get(), emptyCollectionOf(Exception.class)); + } + } diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index db78104be12..9ae8c72a93c 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -108,6 +108,8 @@ PUT logs_write/log/1 "message": "a dummy log" } +POST logs_write/_refresh + # Wait for a day to pass POST /logs_write/_rollover <2> From 6deb5283db6e6911bfd12fa7b0888b980d760d3a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 23 Dec 2016 11:49:27 -0500 Subject: [PATCH 018/119] Fix delete op serialization format constant The delete op serizliation format constant for 5.x was off by one. This commit fixes this, and cleans up the handling of these formats. --- .../org/elasticsearch/index/translog/Translog.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 4ca4f57e341..bdbce03bda1 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -55,7 +55,6 @@ import java.nio.file.StandardCopyOption; import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.List; -import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -826,10 +825,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public static class Index implements Operation { - public static final int FORMAT_2x = 6; // since 2.0-beta1 and 1.1 - public static final int FORMAT_AUTO_GENERATED_IDS = 7; // since 5.0.0-beta1 + public static final int FORMAT_2_X = 6; // since 2.0-beta1 and 1.1 + public static final int FORMAT_AUTO_GENERATED_IDS = FORMAT_2_X + 1; // since 5.0.0-beta1 public static final int FORMAT_SEQ_NO = FORMAT_AUTO_GENERATED_IDS + 1; // since 6.0.0 public static final int SERIALIZATION_FORMAT = FORMAT_SEQ_NO; + private final String id; private final long autoGeneratedIdTimestamp; private final String type; @@ -843,7 +843,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public Index(StreamInput in) throws IOException { final int format = in.readVInt(); // SERIALIZATION_FORMAT - assert format >= FORMAT_2x : "format was: " + format; + assert format >= FORMAT_2_X : "format was: " + format; id = in.readString(); type = in.readString(); source = in.readBytesReference(); @@ -1018,7 +1018,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public static class Delete implements Operation { - private static final int FORMAT_5_X = 3; + private static final int FORMAT_5_X = 2; private static final int FORMAT_SEQ_NO = FORMAT_5_X + 1; public static final int SERIALIZATION_FORMAT = FORMAT_SEQ_NO; @@ -1030,7 +1030,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public Delete(StreamInput in) throws IOException { final int format = in.readVInt();// SERIALIZATION_FORMAT - assert format >= SERIALIZATION_FORMAT - 1: "format was: " + format; + assert format >= FORMAT_5_X : "format was: " + format; uid = new Term(in.readString(), in.readString()); this.version = in.readLong(); this.versionType = VersionType.fromValue(in.readByte()); From d5c18bf5c9638d1d89e50bf850986db3e7b79c5c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 23 Dec 2016 15:20:42 -0500 Subject: [PATCH 019/119] Fix doc stats test when deleting all docs This commit fixes an issue with IndexShardTests#testDocStats when the number of deleted docs is equal to the number of docs. In this case, Luence will remove the underlying segment tripping an assertion on the number of deleted docs. --- .../java/org/elasticsearch/index/shard/IndexShardTests.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 135d77a34ab..023dd49f2f0 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1401,7 +1401,8 @@ public class IndexShardTests extends IndexShardTestCase { IntStream.range(0, Math.toIntExact(numDocs)).boxed().collect(Collectors.toList())); for (final Integer i : ids) { final String id = Integer.toString(i); - final ParsedDocument doc = testParsedDocument(id, id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null); + final ParsedDocument doc = + testParsedDocument(id, id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null); final Engine.Index index = new Engine.Index( new Term("_uid", id), @@ -1428,7 +1429,8 @@ public class IndexShardTests extends IndexShardTestCase { { final DocsStats docStats = indexShard.docStats(); assertThat(docStats.getCount(), equalTo(numDocs)); - assertThat(docStats.getDeleted(), equalTo(numDocsToDelete)); + // Lucene will delete a segment if all docs are deleted from it; this means that we lose the deletes when deleting all docs + assertThat(docStats.getDeleted(), equalTo(numDocsToDelete == numDocs ? 0 : numDocsToDelete)); } // merge them away From 5185a9734df920f600615500faf153847bcc7d39 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 23 Dec 2016 18:14:36 -0500 Subject: [PATCH 020/119] Add comment clarifying property setting in client This commit adds a comment clarifying why we do not catch a security exception in the pre-built transport client. --- .../client/PreBuiltTransportClient.java | 42 +++++++++++-------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java index a173d43a96c..3233470a253 100644 --- a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java +++ b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java @@ -37,13 +37,12 @@ import java.util.Collections; import java.util.concurrent.TimeUnit; /** - * A builder to create an instance of {@link TransportClient} - * This class pre-installs the + * A builder to create an instance of {@link TransportClient}. This class pre-installs the * {@link Netty4Plugin}, * {@link ReindexPlugin}, * {@link PercolatorPlugin}, * and {@link MustachePlugin} - * for the client. These plugins are all elasticsearch core modules required. + * plugins for the client. These plugins are all the required modules for Elasticsearch. */ @SuppressWarnings({"unchecked","varargs"}) public class PreBuiltTransportClient extends TransportClient { @@ -63,6 +62,8 @@ public class PreBuiltTransportClient extends TransportClient { final String noUnsafe = System.getProperty(noUnsafeKey); if (noUnsafe == null) { // disable Netty from using unsafe + // while permissions are needed to set this, if a security exception is thrown the permission needed can either be granted or + // the system property can be set directly before starting the JVM; therefore, we do not catch a security exception here System.setProperty(noUnsafeKey, Boolean.toString(true)); } @@ -70,34 +71,36 @@ public class PreBuiltTransportClient extends TransportClient { final String noKeySetOptimization = System.getProperty(noKeySetOptimizationKey); if (noKeySetOptimization == null) { // disable Netty from replacing the selector key set + // while permissions are needed to set this, if a security exception is thrown the permission needed can either be granted or + // the system property can be set directly before starting the JVM; therefore, we do not catch a security exception here System.setProperty(noKeySetOptimizationKey, Boolean.toString(true)); } } private static final Collection> PRE_INSTALLED_PLUGINS = - Collections.unmodifiableList( - Arrays.asList( - Netty4Plugin.class, - ReindexPlugin.class, - PercolatorPlugin.class, - MustachePlugin.class)); - + Collections.unmodifiableList( + Arrays.asList( + Netty4Plugin.class, + ReindexPlugin.class, + PercolatorPlugin.class, + MustachePlugin.class)); /** * Creates a new transport client with pre-installed plugins. + * * @param settings the settings passed to this transport client - * @param plugins an optional array of additional plugins to run with this client + * @param plugins an optional array of additional plugins to run with this client */ @SafeVarargs public PreBuiltTransportClient(Settings settings, Class... plugins) { this(settings, Arrays.asList(plugins)); } - /** * Creates a new transport client with pre-installed plugins. + * * @param settings the settings passed to this transport client - * @param plugins a collection of additional plugins to run with this client + * @param plugins a collection of additional plugins to run with this client */ public PreBuiltTransportClient(Settings settings, Collection> plugins) { this(settings, plugins, null); @@ -105,12 +108,15 @@ public class PreBuiltTransportClient extends TransportClient { /** * Creates a new transport client with pre-installed plugins. - * @param settings the settings passed to this transport client - * @param plugins a collection of additional plugins to run with this client - * @param hostFailureListener a failure listener that is invoked if a node is disconnected. This can be null + * + * @param settings the settings passed to this transport client + * @param plugins a collection of additional plugins to run with this client + * @param hostFailureListener a failure listener that is invoked if a node is disconnected; this can be null */ - public PreBuiltTransportClient(Settings settings, Collection> plugins, - HostFailureListener hostFailureListener) { + public PreBuiltTransportClient( + Settings settings, + Collection> plugins, + HostFailureListener hostFailureListener) { super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS), hostFailureListener); } From 8261bd358a5114089d88a9af238436d17d674f50 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Sun, 25 Dec 2016 19:00:20 -0500 Subject: [PATCH 021/119] Synchronize snapshot deletions on the cluster state (#22313) Before, snapshot/restore would synchronize all operations on the cluster state except for deleting snapshots. This meant that only one snapshot/restore operation would be allowed in the cluster at any given time, except for deletions - there could be two or more snapshot deletions running at the same time, or a deletion could be running, unbeknowest to the rest of the cluster, and thus a snapshot or restore would be allowed at the same time as the snapshot deletion was still in progress. This could cause any number of synchronization issues, including the situation where a snapshot that was deleted could reappear in the index-N file, even though its data was no longer present in the repository. This commit introduces a new custom type to the cluster state to represent deletions in progress. Now, another deletion cannot start if a deletion is currently in progress. Similarily, a snapshot or restore cannot be started if a deletion is currently in progress. In each case, if attempting to run another snapshot/restore operation while a deletion is in progress, a ConcurrentSnapshotExecutionException will be thrown. This is the same exception thrown if trying to snapshot while another snapshot is in progress, or restore while a snapshot is in progress. Closes #19957 --- .../delete/TransportDeleteSnapshotAction.java | 2 +- .../elasticsearch/cluster/ClusterState.java | 30 ++- .../elasticsearch/cluster/DiffableUtils.java | 31 ++- .../cluster/SnapshotDeletionsInProgress.java | 217 ++++++++++++++++++ .../cluster/SnapshotsInProgress.java | 30 ++- .../repositories/Repository.java | 7 +- .../repositories/RepositoryData.java | 52 +++-- .../blobstore/BlobStoreRepository.java | 34 ++- .../snapshots/RestoreService.java | 8 + .../snapshots/SnapshotsService.java | 174 +++++++++++--- .../cluster/ClusterStateDiffIT.java | 1 + .../MetaDataDeleteIndexServiceTests.java | 2 +- .../index/shard/IndexShardTests.java | 7 +- .../repositories/RepositoryDataTests.java | 32 +-- .../blobstore/BlobStoreRepositoryTests.java | 68 ++++-- .../MinThreadsSnapshotRestoreIT.java | 211 +++++++++++++++++ .../SharedClusterSnapshotRestoreIT.java | 1 + .../test/snapshot.create/10_basic.yaml | 7 + 18 files changed, 798 insertions(+), 116 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java create mode 100644 core/src/test/java/org/elasticsearch/snapshots/MinThreadsSnapshotRestoreIT.java diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index 3c37d1870e5..5cce5482ec5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -75,6 +75,6 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeAction { // register non plugin custom parts registerPrototype(SnapshotsInProgress.TYPE, SnapshotsInProgress.PROTO); registerPrototype(RestoreInProgress.TYPE, RestoreInProgress.PROTO); + registerPrototype(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.PROTO); } public static T lookupPrototype(String type) { @@ -715,8 +715,18 @@ public class ClusterState implements ToXContent, Diffable { routingTable.writeTo(out); nodes.writeTo(out); blocks.writeTo(out); - out.writeVInt(customs.size()); + boolean omitSnapshotDeletions = false; + if (out.getVersion().before(SnapshotDeletionsInProgress.VERSION_INTRODUCED) + && customs.containsKey(SnapshotDeletionsInProgress.TYPE)) { + // before the stated version, there were no SnapshotDeletionsInProgress, so + // don't transfer over the wire protocol + omitSnapshotDeletions = true; + } + out.writeVInt(omitSnapshotDeletions ? customs.size() - 1 : customs.size()); for (ObjectObjectCursor cursor : customs) { + if (omitSnapshotDeletions && cursor.key.equals(SnapshotDeletionsInProgress.TYPE)) { + continue; + } out.writeString(cursor.key); cursor.value.writeTo(out); } @@ -787,7 +797,21 @@ public class ClusterState implements ToXContent, Diffable { nodes.writeTo(out); metaData.writeTo(out); blocks.writeTo(out); - customs.writeTo(out); + Diff> customsDiff = customs; + if (out.getVersion().before(SnapshotDeletionsInProgress.VERSION_INTRODUCED)) { + customsDiff = removeSnapshotDeletionsCustomDiff(customsDiff); + } + customsDiff.writeTo(out); + } + + private Diff> removeSnapshotDeletionsCustomDiff(Diff> customs) { + if (customs instanceof DiffableUtils.ImmutableOpenMapDiff) { + @SuppressWarnings("unchecked") + DiffableUtils.ImmutableOpenMapDiff customsDiff = ((DiffableUtils.ImmutableOpenMapDiff) customs) + .withKeyRemoved(SnapshotDeletionsInProgress.TYPE); + return customsDiff; + } + return customs; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index 1a3557890dd..234f22010fa 100644 --- a/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -214,12 +214,17 @@ public final class DiffableUtils { * * @param the object type */ - private static class ImmutableOpenMapDiff extends MapDiff> { + public static class ImmutableOpenMapDiff extends MapDiff> { protected ImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { super(in, keySerializer, valueSerializer); } + private ImmutableOpenMapDiff(KeySerializer keySerializer, ValueSerializer valueSerializer, + List deletes, Map> diffs, Map upserts) { + super(keySerializer, valueSerializer, deletes, diffs, upserts); + } + public ImmutableOpenMapDiff(ImmutableOpenMap before, ImmutableOpenMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) { super(keySerializer, valueSerializer); @@ -245,6 +250,21 @@ public final class DiffableUtils { } } + /** + * Returns a new diff map with the given key removed, does not modify the invoking instance. + * If the key does not exist in the diff map, the same instance is returned. + */ + public ImmutableOpenMapDiff withKeyRemoved(K key) { + if (this.diffs.containsKey(key) == false && this.upserts.containsKey(key) == false) { + return this; + } + Map> newDiffs = new HashMap<>(this.diffs); + newDiffs.remove(key); + Map newUpserts = new HashMap<>(this.upserts); + newUpserts.remove(key); + return new ImmutableOpenMapDiff<>(this.keySerializer, this.valueSerializer, this.deletes, newDiffs, newUpserts); + } + @Override public ImmutableOpenMap apply(ImmutableOpenMap map) { ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); @@ -346,6 +366,15 @@ public final class DiffableUtils { upserts = new HashMap<>(); } + protected MapDiff(KeySerializer keySerializer, ValueSerializer valueSerializer, + List deletes, Map> diffs, Map upserts) { + this.keySerializer = keySerializer; + this.valueSerializer = valueSerializer; + this.deletes = deletes; + this.diffs = diffs; + this.upserts = upserts; + } + protected MapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { this.keySerializer = keySerializer; this.valueSerializer = valueSerializer; diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java new file mode 100644 index 00000000000..f6257fd7a92 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -0,0 +1,217 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState.Custom; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.snapshots.Snapshot; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * A class that represents the snapshot deletions that are in progress in the cluster. + */ +public class SnapshotDeletionsInProgress extends AbstractDiffable implements Custom { + + public static final String TYPE = "snapshot_deletions"; + public static final SnapshotDeletionsInProgress PROTO = new SnapshotDeletionsInProgress(Collections.emptyList()); + // the version where SnapshotDeletionsInProgress was introduced + public static final Version VERSION_INTRODUCED = Version.V_6_0_0_alpha1_UNRELEASED; + + // the list of snapshot deletion request entries + private final List entries; + + private SnapshotDeletionsInProgress(List entries) { + this.entries = Collections.unmodifiableList(entries); + } + + public SnapshotDeletionsInProgress(StreamInput in) throws IOException { + this.entries = Collections.unmodifiableList(in.readList(Entry::new)); + } + + /** + * Returns a new instance of {@link SnapshotDeletionsInProgress} with the given + * {@link Entry} added. + */ + public static SnapshotDeletionsInProgress newInstance(Entry entry) { + return new SnapshotDeletionsInProgress(Collections.singletonList(entry)); + } + + /** + * Returns a new instance of {@link SnapshotDeletionsInProgress} which adds + * the given {@link Entry} to the invoking instance. + */ + public SnapshotDeletionsInProgress withAddedEntry(Entry entry) { + List entries = new ArrayList<>(getEntries()); + entries.add(entry); + return new SnapshotDeletionsInProgress(entries); + } + + /** + * Returns a new instance of {@link SnapshotDeletionsInProgress} which removes + * the given entry from the invoking instance. + */ + public SnapshotDeletionsInProgress withRemovedEntry(Entry entry) { + List entries = new ArrayList<>(getEntries()); + entries.remove(entry); + return new SnapshotDeletionsInProgress(entries); + } + + /** + * Returns an unmodifiable list of snapshot deletion entries. + */ + public List getEntries() { + return entries; + } + + /** + * Returns {@code true} if there are snapshot deletions in progress in the cluster, + * returns {@code false} otherwise. + */ + public boolean hasDeletionsInProgress() { + return entries.isEmpty() == false; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SnapshotDeletionsInProgress that = (SnapshotDeletionsInProgress) o; + return entries.equals(that.entries); + } + + @Override + public int hashCode() { + return 31 + entries.hashCode(); + } + + @Override + public Custom readFrom(StreamInput in) throws IOException { + return new SnapshotDeletionsInProgress(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(entries); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(TYPE); + for (Entry entry : entries) { + builder.startObject(); + { + builder.field("repository", entry.snapshot.getRepository()); + builder.field("snapshot", entry.snapshot.getSnapshotId().getName()); + builder.timeValueField("start_time_millis", "start_time", entry.startTime); + builder.field("repository_state_id", entry.repositoryStateId); + } + builder.endObject(); + } + builder.endArray(); + return builder; + } + + /** + * A class representing a snapshot deletion request entry in the cluster state. + */ + public static final class Entry implements Writeable { + private final Snapshot snapshot; + private final long startTime; + private final long repositoryStateId; + + public Entry(Snapshot snapshot, long startTime, long repositoryStateId) { + this.snapshot = snapshot; + this.startTime = startTime; + this.repositoryStateId = repositoryStateId; + } + + public Entry(StreamInput in) throws IOException { + this.snapshot = new Snapshot(in); + this.startTime = in.readVLong(); + this.repositoryStateId = in.readLong(); + } + + /** + * The snapshot to delete. + */ + public Snapshot getSnapshot() { + return snapshot; + } + + /** + * The start time in milliseconds for deleting the snapshots. + */ + public long getStartTime() { + return startTime; + } + + /** + * The repository state id at the time the snapshot deletion began. + */ + public long getRepositoryStateId() { + return repositoryStateId; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Entry that = (Entry) o; + return snapshot.equals(that.snapshot) + && startTime == that.startTime + && repositoryStateId == that.repositoryStateId; + } + + @Override + public int hashCode() { + return Objects.hash(snapshot, startTime, repositoryStateId); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + snapshot.writeTo(out); + out.writeVLong(startTime); + out.writeLong(repositoryStateId); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 6df5f85987d..26ddbec7a2a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -22,6 +22,7 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; @@ -48,6 +49,12 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus public static final SnapshotsInProgress PROTO = new SnapshotsInProgress(); + // denotes an undefined repository state id, which will happen when receiving a cluster state with + // a snapshot in progress from a pre 5.2.x node + public static final long UNDEFINED_REPOSITORY_STATE_ID = -2L; + // the version where repository state ids were introduced + private static final Version REPOSITORY_ID_INTRODUCED_VERSION = Version.V_6_0_0_alpha1_UNRELEASED; + @Override public boolean equals(Object o) { if (this == o) return true; @@ -74,9 +81,10 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus private final List indices; private final ImmutableOpenMap> waitingIndices; private final long startTime; + private final long repositoryStateId; public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, State state, List indices, - long startTime, ImmutableOpenMap shards) { + long startTime, long repositoryStateId, ImmutableOpenMap shards) { this.state = state; this.snapshot = snapshot; this.includeGlobalState = includeGlobalState; @@ -90,10 +98,12 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus this.shards = shards; this.waitingIndices = findWaitingIndices(shards); } + this.repositoryStateId = repositoryStateId; } public Entry(Entry entry, State state, ImmutableOpenMap shards) { - this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, shards); + this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, + entry.repositoryStateId, shards); } public Entry(Entry entry, ImmutableOpenMap shards) { @@ -132,6 +142,10 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus return startTime; } + public long getRepositoryStateId() { + return repositoryStateId; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -147,6 +161,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus if (!snapshot.equals(entry.snapshot)) return false; if (state != entry.state) return false; if (!waitingIndices.equals(entry.waitingIndices)) return false; + if (repositoryStateId != entry.repositoryStateId) return false; return true; } @@ -161,6 +176,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus result = 31 * result + indices.hashCode(); result = 31 * result + waitingIndices.hashCode(); result = 31 * result + Long.hashCode(startTime); + result = 31 * result + Long.hashCode(repositoryStateId); return result; } @@ -387,12 +403,17 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus State shardState = State.fromValue(in.readByte()); builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); } + long repositoryStateId = UNDEFINED_REPOSITORY_STATE_ID; + if (in.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { + repositoryStateId = in.readLong(); + } entries[i] = new Entry(snapshot, includeGlobalState, partial, state, Collections.unmodifiableList(indexBuilder), startTime, + repositoryStateId, builder.build()); } return new SnapshotsInProgress(entries); @@ -417,6 +438,9 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus out.writeOptionalString(shardEntry.value.nodeId()); out.writeByte(shardEntry.value.state().value()); } + if (out.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) { + out.writeLong(entry.repositoryStateId); + } } } @@ -430,6 +454,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus private static final String INDICES = "indices"; private static final String START_TIME_MILLIS = "start_time_millis"; private static final String START_TIME = "start_time"; + private static final String REPOSITORY_STATE_ID = "repository_state_id"; private static final String SHARDS = "shards"; private static final String INDEX = "index"; private static final String SHARD = "shard"; @@ -461,6 +486,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus } builder.endArray(); builder.timeValueField(START_TIME_MILLIS, START_TIME, entry.startTime()); + builder.field(REPOSITORY_STATE_ID, entry.getRepositoryStateId()); builder.startArray(SHARDS); { for (ObjectObjectCursor shardEntry : entry.shards) { diff --git a/core/src/main/java/org/elasticsearch/repositories/Repository.java b/core/src/main/java/org/elasticsearch/repositories/Repository.java index b1f534bf684..462b7ea1dab 100644 --- a/core/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/core/src/main/java/org/elasticsearch/repositories/Repository.java @@ -115,16 +115,19 @@ public interface Repository extends LifecycleComponent { * @param failure global failure reason or null * @param totalShards total number of shards * @param shardFailures list of shard failures + * @param repositoryStateId the unique id identifying the state of the repository when the snapshot began * @return snapshot description */ - SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, List shardFailures); + SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, + List shardFailures, long repositoryStateId); /** * Deletes snapshot * * @param snapshotId snapshot id + * @param repositoryStateId the unique id identifying the state of the repository when the snapshot deletion began */ - void deleteSnapshot(SnapshotId snapshotId); + void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId); /** * Returns snapshot throttle time in nanoseconds diff --git a/core/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/core/src/main/java/org/elasticsearch/repositories/RepositoryData.java index 4927e2b41b7..eb0bbb2f868 100644 --- a/core/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/core/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -44,8 +44,19 @@ import java.util.stream.Collectors; */ public final class RepositoryData implements ToXContent { - public static final RepositoryData EMPTY = new RepositoryData(Collections.emptyList(), Collections.emptyMap()); + /** + * The generation value indicating the repository has no index generational files. + */ + public static final long EMPTY_REPO_GEN = -1L; + /** + * An instance initialized for an empty repository. + */ + public static final RepositoryData EMPTY = new RepositoryData(EMPTY_REPO_GEN, Collections.emptyList(), Collections.emptyMap()); + /** + * The generational id of the index file from which the repository data was read. + */ + private final long genId; /** * The ids of the snapshots in the repository. */ @@ -59,7 +70,8 @@ public final class RepositoryData implements ToXContent { */ private final Map> indexSnapshots; - public RepositoryData(List snapshotIds, Map> indexSnapshots) { + private RepositoryData(long genId, List snapshotIds, Map> indexSnapshots) { + this.genId = genId; this.snapshotIds = Collections.unmodifiableList(snapshotIds); this.indices = Collections.unmodifiableMap(indexSnapshots.keySet() .stream() @@ -67,8 +79,22 @@ public final class RepositoryData implements ToXContent { this.indexSnapshots = Collections.unmodifiableMap(indexSnapshots); } + /** + * Creates an instance of {@link RepositoryData} on a fresh repository (one that has no index-N files). + */ + public static RepositoryData initRepositoryData(List snapshotIds, Map> indexSnapshots) { + return new RepositoryData(EMPTY_REPO_GEN, snapshotIds, indexSnapshots); + } + protected RepositoryData copy() { - return new RepositoryData(snapshotIds, indexSnapshots); + return new RepositoryData(genId, snapshotIds, indexSnapshots); + } + + /** + * Gets the generational index file id from which this instance was read. + */ + public long getGenId() { + return genId; } /** @@ -91,7 +117,10 @@ public final class RepositoryData implements ToXContent { */ public RepositoryData addSnapshot(final SnapshotId snapshotId, final List snapshottedIndices) { if (snapshotIds.contains(snapshotId)) { - throw new IllegalArgumentException("[" + snapshotId + "] already exists in the repository data"); + // if the snapshot id already exists in the repository data, it means an old master + // that is blocked from the cluster is trying to finalize a snapshot concurrently with + // the new master, so we make the operation idempotent + return this; } List snapshots = new ArrayList<>(snapshotIds); snapshots.add(snapshotId); @@ -110,14 +139,7 @@ public final class RepositoryData implements ToXContent { allIndexSnapshots.put(indexId, ids); } } - return new RepositoryData(snapshots, allIndexSnapshots); - } - - /** - * Initializes the indices in the repository metadata; returns a new instance. - */ - public RepositoryData initIndices(final Map> indexSnapshots) { - return new RepositoryData(snapshotIds, indexSnapshots); + return new RepositoryData(genId, snapshots, allIndexSnapshots); } /** @@ -146,7 +168,7 @@ public final class RepositoryData implements ToXContent { indexSnapshots.put(indexId, set); } - return new RepositoryData(newSnapshotIds, indexSnapshots); + return new RepositoryData(genId, newSnapshotIds, indexSnapshots); } /** @@ -256,7 +278,7 @@ public final class RepositoryData implements ToXContent { return builder; } - public static RepositoryData fromXContent(final XContentParser parser) throws IOException { + public static RepositoryData fromXContent(final XContentParser parser, final long genId) throws IOException { List snapshots = new ArrayList<>(); Map> indexSnapshots = new HashMap<>(); if (parser.nextToken() == XContentParser.Token.START_OBJECT) { @@ -305,7 +327,7 @@ public final class RepositoryData implements ToXContent { } else { throw new ElasticsearchParseException("start object expected"); } - return new RepositoryData(snapshots, indexSnapshots); + return new RepositoryData(genId, snapshots, indexSnapshots); } } diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index c7abded5e0e..72855bc7f30 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -39,6 +39,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; +import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; @@ -324,7 +325,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } @Override - public void deleteSnapshot(SnapshotId snapshotId) { + public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { if (isReadOnly()) { throw new RepositoryException(metadata.name(), "cannot delete snapshot from a readonly repository"); } @@ -352,7 +353,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp try { // Delete snapshot from the index file, since it is the maintainer of truth of active snapshots final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId); - writeIndexGen(updatedRepositoryData); + writeIndexGen(updatedRepositoryData, repositoryStateId); // delete the snapshot file safeSnapshotBlobDelete(snapshot, snapshotId.getUUID()); @@ -454,7 +455,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp final long startTime, final String failure, final int totalShards, - final List shardFailures) { + final List shardFailures, + final long repositoryStateId) { try { SnapshotInfo blobStoreSnapshot = new SnapshotInfo(snapshotId, indices.stream().map(IndexId::getName).collect(Collectors.toList()), @@ -467,7 +469,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp final RepositoryData repositoryData = getRepositoryData(); List snapshotIds = repositoryData.getSnapshotIds(); if (!snapshotIds.contains(snapshotId)) { - writeIndexGen(repositoryData.addSnapshot(snapshotId, indices)); + writeIndexGen(repositoryData.addSnapshot(snapshotId, indices), repositoryStateId); } return blobStoreSnapshot; } catch (IOException ex) { @@ -628,7 +630,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp Streams.copy(blob, out); // EMPTY is safe here because RepositoryData#fromXContent calls namedObject try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, out.bytes())) { - repositoryData = RepositoryData.fromXContent(parser); + repositoryData = RepositoryData.fromXContent(parser, indexGen); } } return repositoryData; @@ -654,8 +656,17 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp return snapshotsBlobContainer; } - protected void writeIndexGen(final RepositoryData repositoryData) throws IOException { + protected void writeIndexGen(final RepositoryData repositoryData, final long repositoryStateId) throws IOException { assert isReadOnly() == false; // can not write to a read only repository + final long currentGen = latestIndexBlobId(); + if (repositoryStateId != SnapshotsInProgress.UNDEFINED_REPOSITORY_STATE_ID && currentGen != repositoryStateId) { + // the index file was updated by a concurrent operation, so we were operating on stale + // repository data + throw new RepositoryException(metadata.name(), "concurrent modification of the index-N file, expected current generation [" + + repositoryStateId + "], actual current generation [" + currentGen + + "] - possibly due to simultaneous snapshot deletion requests"); + } + final long newGen = currentGen + 1; final BytesReference snapshotsBytes; try (BytesStreamOutput bStream = new BytesStreamOutput()) { try (StreamOutput stream = new OutputStreamStreamOutput(bStream)) { @@ -665,12 +676,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } snapshotsBytes = bStream.bytes(); } - final long gen = latestIndexBlobId() + 1; // write the index file - writeAtomic(INDEX_FILE_PREFIX + Long.toString(gen), snapshotsBytes); + writeAtomic(INDEX_FILE_PREFIX + Long.toString(newGen), snapshotsBytes); // delete the N-2 index file if it exists, keep the previous one around as a backup - if (isReadOnly() == false && gen - 2 >= 0) { - final String oldSnapshotIndexFile = INDEX_FILE_PREFIX + Long.toString(gen - 2); + if (isReadOnly() == false && newGen - 2 >= 0) { + final String oldSnapshotIndexFile = INDEX_FILE_PREFIX + Long.toString(newGen - 2); if (snapshotsBlobContainer.blobExists(oldSnapshotIndexFile)) { snapshotsBlobContainer.deleteBlob(oldSnapshotIndexFile); } @@ -683,7 +693,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp // write the current generation to the index-latest file final BytesReference genBytes; try (BytesStreamOutput bStream = new BytesStreamOutput()) { - bStream.writeLong(gen); + bStream.writeLong(newGen); genBytes = bStream.bytes(); } if (snapshotsBlobContainer.blobExists(INDEX_LATEST_BLOB)) { @@ -719,7 +729,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp // index-latest blob // in a read-only repository, we can't know which of the two scenarios it is, // but we will assume (1) because we can't do anything about (2) anyway - return -1; + return RepositoryData.EMPTY_REPO_GEN; } } } diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index e2b389d1e05..ac01bc6fc5d 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.RestoreInProgress.ShardRestoreStatus; +import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -205,6 +206,13 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp if (restoreInProgress != null && !restoreInProgress.entries().isEmpty()) { throw new ConcurrentSnapshotExecutionException(snapshot, "Restore process is already running in this cluster"); } + // Check if the snapshot to restore is currently being deleted + SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE); + if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { + throw new ConcurrentSnapshotExecutionException(snapshot, + "cannot restore a snapshot while a snapshot deletion is in-progress [" + + deletionsInProgress.getEntries().get(0).getSnapshot() + "]"); + } // Updating cluster state ClusterState.Builder builder = ClusterState.builder(currentState); diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 056b2e7b10d..1b5bfde167c 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.RestoreInProgress; +import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; import org.elasticsearch.cluster.SnapshotsInProgress.State; @@ -48,6 +49,7 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -232,7 +234,11 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus @Override public ClusterState execute(ClusterState currentState) { validate(request, currentState); - + SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE); + if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { + throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, + "cannot snapshot while a snapshot deletion is in-progress"); + } SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); if (snapshots == null || snapshots.entries().isEmpty()) { // Store newSnapshot here to be processed in clusterStateProcessed @@ -245,10 +251,10 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus State.INIT, snapshotIndices, System.currentTimeMillis(), + repositoryData.getGenId(), null); snapshots = new SnapshotsInProgress(newSnapshot); } else { - // TODO: What should we do if a snapshot is already running? throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, "a snapshot is already running"); } return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); @@ -468,7 +474,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus snapshot.startTime(), ExceptionsHelper.detailedMessage(exception), 0, - Collections.emptyList()); + Collections.emptyList(), + snapshot.getRepositoryStateId()); } catch (Exception inner) { inner.addSuppressed(exception); logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner); @@ -601,12 +608,35 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus if (event.routingTableChanged()) { processStartedShards(event); } + finalizeSnapshotDeletionFromPreviousMaster(event); } } catch (Exception e) { logger.warn("Failed to update snapshot state ", e); } } + /** + * Finalizes a snapshot deletion in progress if the current node is the master but it + * was not master in the previous cluster state and there is still a lingering snapshot + * deletion in progress in the cluster state. This means that the old master failed + * before it could clean up an in-progress snapshot deletion. We attempt to delete the + * snapshot files and remove the deletion from the cluster state. It is possible that the + * old master was in a state of long GC and then it resumes and tries to delete the snapshot + * that has already been deleted by the current master. This is acceptable however, since + * the old master's snapshot deletion will just respond with an error but in actuality, the + * snapshot was deleted and a call to GET snapshots would reveal that the snapshot no longer exists. + */ + private void finalizeSnapshotDeletionFromPreviousMaster(ClusterChangedEvent event) { + if (event.localNodeMaster() && event.previousState().nodes().isLocalNodeElectedMaster() == false) { + SnapshotDeletionsInProgress deletionsInProgress = event.state().custom(SnapshotDeletionsInProgress.TYPE); + if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { + assert deletionsInProgress.getEntries().size() == 1 : "only one in-progress deletion allowed per cluster"; + SnapshotDeletionsInProgress.Entry entry = deletionsInProgress.getEntries().get(0); + deleteSnapshotFromRepository(entry.getSnapshot(), null, entry.getRepositoryStateId()); + } + } + } + /** * Cleans up shard snapshots that were running on removed nodes * @@ -667,7 +697,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus public void onFailure(Exception e) { logger.warn("failed to clean up abandoned snapshot {} in INIT state", snapshot.snapshot()); } - }); + }, updatedSnapshot.getRepositoryStateId(), false); } else if (snapshot.state() == State.SUCCESS && newMaster) { // Finalize the snapshot endSnapshot(snapshot); @@ -875,7 +905,14 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus shardFailures.add(new SnapshotShardFailure(status.nodeId(), shardId, status.reason())); } } - SnapshotInfo snapshotInfo = repository.finalizeSnapshot(snapshot.getSnapshotId(), entry.indices(), entry.startTime(), failure, entry.shards().size(), Collections.unmodifiableList(shardFailures)); + SnapshotInfo snapshotInfo = repository.finalizeSnapshot( + snapshot.getSnapshotId(), + entry.indices(), + entry.startTime(), + failure, + entry.shards().size(), + Collections.unmodifiableList(shardFailures), + entry.getRepositoryStateId()); removeSnapshotFromClusterState(snapshot, snapshotInfo, null); } catch (Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e); @@ -904,6 +941,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus private void removeSnapshotFromClusterState(final Snapshot snapshot, final SnapshotInfo snapshotInfo, final Exception failure, @Nullable ActionListener listener) { clusterService.submitStateUpdateTask("remove snapshot metadata", new ClusterStateUpdateTask() { + @Override public ClusterState execute(ClusterState currentState) { SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); @@ -961,10 +999,12 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus * @param snapshotName snapshotName * @param listener listener */ - public void deleteSnapshot(final String repositoryName, final String snapshotName, final DeleteSnapshotListener listener) { + public void deleteSnapshot(final String repositoryName, final String snapshotName, final DeleteSnapshotListener listener, + final boolean immediatePriority) { // First, look for the snapshot in the repository final Repository repository = repositoriesService.repository(repositoryName); - Optional matchedEntry = repository.getRepositoryData().getSnapshotIds() + final RepositoryData repositoryData = repository.getRepositoryData(); + Optional matchedEntry = repositoryData.getSnapshotIds() .stream() .filter(s -> s.getName().equals(snapshotName)) .findFirst(); @@ -976,7 +1016,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus if (matchedEntry.isPresent() == false) { throw new SnapshotMissingException(repositoryName, snapshotName); } - deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener); + deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener, repositoryData.getGenId(), immediatePriority); } /** @@ -984,16 +1024,23 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus *

* If the snapshot is still running cancels the snapshot first and then deletes it from the repository. * - * @param snapshot snapshot - * @param listener listener + * @param snapshot snapshot + * @param listener listener + * @param repositoryStateId the unique id for the state of the repository */ - public void deleteSnapshot(final Snapshot snapshot, final DeleteSnapshotListener listener) { - clusterService.submitStateUpdateTask("delete snapshot", new ClusterStateUpdateTask() { + private void deleteSnapshot(final Snapshot snapshot, final DeleteSnapshotListener listener, final long repositoryStateId, + final boolean immediatePriority) { + Priority priority = immediatePriority ? Priority.IMMEDIATE : Priority.NORMAL; + clusterService.submitStateUpdateTask("delete snapshot", new ClusterStateUpdateTask(priority) { boolean waitForSnapshot = false; @Override public ClusterState execute(ClusterState currentState) throws Exception { + SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE); + if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { + throw new ConcurrentSnapshotExecutionException(snapshot, "cannot delete - another snapshot is currently being deleted"); + } RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE); if (restoreInProgress != null) { // don't allow snapshot deletions while a restore is taking place, @@ -1003,19 +1050,27 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus throw new ConcurrentSnapshotExecutionException(snapshot, "cannot delete snapshot during a restore"); } } + ClusterState.Builder clusterStateBuilder = ClusterState.builder(currentState); SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - if (snapshots == null) { - // No snapshots running - we can continue - return currentState; - } - SnapshotsInProgress.Entry snapshotEntry = snapshots.snapshot(snapshot); + SnapshotsInProgress.Entry snapshotEntry = snapshots != null ? snapshots.snapshot(snapshot) : null; if (snapshotEntry == null) { - // This snapshot is not running - continue - if (!snapshots.entries().isEmpty()) { + // This snapshot is not running - delete + if (snapshots != null && !snapshots.entries().isEmpty()) { // However other snapshots are running - cannot continue throw new ConcurrentSnapshotExecutionException(snapshot, "another snapshot is currently running cannot delete"); } - return currentState; + // add the snapshot deletion to the cluster state + SnapshotDeletionsInProgress.Entry entry = new SnapshotDeletionsInProgress.Entry( + snapshot, + System.currentTimeMillis(), + repositoryStateId + ); + if (deletionsInProgress != null) { + deletionsInProgress = deletionsInProgress.withAddedEntry(entry); + } else { + deletionsInProgress = SnapshotDeletionsInProgress.newInstance(entry); + } + clusterStateBuilder.putCustom(SnapshotDeletionsInProgress.TYPE, deletionsInProgress); } else { // This snapshot is currently running - stopping shards first waitForSnapshot = true; @@ -1060,8 +1115,9 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus } SnapshotsInProgress.Entry newSnapshot = new SnapshotsInProgress.Entry(snapshotEntry, State.ABORTED, shards); snapshots = new SnapshotsInProgress(newSnapshot); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); + clusterStateBuilder.putCustom(SnapshotsInProgress.TYPE, snapshots); } + return clusterStateBuilder.build(); } @Override @@ -1079,7 +1135,10 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus if (completedSnapshot.equals(snapshot)) { logger.trace("deleted snapshot completed - deleting files"); removeListener(this); - deleteSnapshotFromRepository(snapshot, listener); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> + deleteSnapshot(completedSnapshot.getRepository(), completedSnapshot.getSnapshotId().getName(), + listener, true) + ); } } @@ -1088,13 +1147,15 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus if (failedSnapshot.equals(snapshot)) { logger.trace("deleted snapshot failed - deleting files", e); removeListener(this); - deleteSnapshotFromRepository(snapshot, listener); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> + deleteSnapshot(failedSnapshot.getRepository(), failedSnapshot.getSnapshotId().getName(), listener, true) + ); } } }); } else { logger.trace("deleted snapshot is not running - deleting files"); - deleteSnapshotFromRepository(snapshot, listener); + deleteSnapshotFromRepository(snapshot, listener, repositoryStateId); } } }); @@ -1116,6 +1177,14 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus } } } + SnapshotDeletionsInProgress deletionsInProgress = clusterState.custom(SnapshotDeletionsInProgress.TYPE); + if (deletionsInProgress != null) { + for (SnapshotDeletionsInProgress.Entry entry : deletionsInProgress.getEntries()) { + if (entry.getSnapshot().getRepository().equals(repository)) { + return true; + } + } + } return false; } @@ -1124,15 +1193,62 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus * * @param snapshot snapshot * @param listener listener + * @param repositoryStateId the unique id representing the state of the repository at the time the deletion began */ - private void deleteSnapshotFromRepository(final Snapshot snapshot, final DeleteSnapshotListener listener) { + private void deleteSnapshotFromRepository(final Snapshot snapshot, @Nullable final DeleteSnapshotListener listener, + long repositoryStateId) { threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { try { Repository repository = repositoriesService.repository(snapshot.getRepository()); - repository.deleteSnapshot(snapshot.getSnapshotId()); - listener.onResponse(); - } catch (Exception t) { - listener.onFailure(t); + repository.deleteSnapshot(snapshot.getSnapshotId(), repositoryStateId); + removeSnapshotDeletionFromClusterState(snapshot, null, listener); + } catch (Exception ex) { + removeSnapshotDeletionFromClusterState(snapshot, ex, listener); + } + }); + } + + /** + * Removes the snapshot deletion from {@link SnapshotDeletionsInProgress} in the cluster state. + */ + private void removeSnapshotDeletionFromClusterState(final Snapshot snapshot, @Nullable final Exception failure, + @Nullable final DeleteSnapshotListener listener) { + clusterService.submitStateUpdateTask("remove snapshot deletion metadata", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + SnapshotDeletionsInProgress deletions = currentState.custom(SnapshotDeletionsInProgress.TYPE); + if (deletions != null) { + boolean changed = false; + if (deletions.hasDeletionsInProgress()) { + assert deletions.getEntries().size() == 1 : "should have exactly one deletion in progress"; + SnapshotDeletionsInProgress.Entry entry = deletions.getEntries().get(0); + deletions = deletions.withRemovedEntry(entry); + changed = true; + } + if (changed) { + return ClusterState.builder(currentState).putCustom(SnapshotDeletionsInProgress.TYPE, deletions).build(); + } + } + return currentState; + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to remove snapshot deletion metadata", snapshot), e); + if (listener != null) { + listener.onFailure(e); + } + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + if (listener != null) { + if (failure != null) { + listener.onFailure(failure); + } else { + listener.onResponse(); + } + } } }); } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 9d44dbbca38..9fdbf13fc8c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -679,6 +679,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase { SnapshotsInProgress.State.fromValue((byte) randomIntBetween(0, 6)), Collections.emptyList(), Math.abs(randomLong()), + (long) randomIntBetween(0, 1000), ImmutableOpenMap.of())); case 1: return new RestoreInProgress(new RestoreInProgress.Entry( diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexServiceTests.java index a48f3ae3e10..0ad36713810 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexServiceTests.java @@ -59,7 +59,7 @@ public class MetaDataDeleteIndexServiceTests extends ESTestCase { Snapshot snapshot = new Snapshot("doesn't matter", new SnapshotId("snapshot name", "snapshot uuid")); SnapshotsInProgress snaps = new SnapshotsInProgress(new SnapshotsInProgress.Entry(snapshot, true, false, SnapshotsInProgress.State.INIT, singletonList(new IndexId(index, "doesn't matter")), - System.currentTimeMillis(), ImmutableOpenMap.of())); + System.currentTimeMillis(), (long) randomIntBetween(0, 1000), ImmutableOpenMap.of())); ClusterState state = ClusterState.builder(clusterState(index)) .putCustom(SnapshotsInProgress.TYPE, snaps) .build(); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 023dd49f2f0..d01aeb2bf00 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1490,7 +1490,7 @@ public class IndexShardTests extends IndexShardTestCase { public RepositoryData getRepositoryData() { Map> map = new HashMap<>(); map.put(new IndexId(indexName, "blah"), emptySet()); - return new RepositoryData(Collections.emptyList(), map); + return RepositoryData.initRepositoryData(Collections.emptyList(), map); } @Override @@ -1498,12 +1498,13 @@ public class IndexShardTests extends IndexShardTestCase { } @Override - public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, List shardFailures) { + public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, + List shardFailures, long repositoryStateId) { return null; } @Override - public void deleteSnapshot(SnapshotId snapshotId) { + public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { } @Override diff --git a/core/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java b/core/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java index c1ac1abfdb5..97d415fe4f9 100644 --- a/core/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java +++ b/core/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java @@ -56,7 +56,10 @@ public class RepositoryDataTests extends ESTestCase { XContentBuilder builder = JsonXContent.contentBuilder(); repositoryData.toXContent(builder, ToXContent.EMPTY_PARAMS); XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); - assertEquals(repositoryData, RepositoryData.fromXContent(parser)); + long gen = (long) randomIntBetween(0, 500); + RepositoryData fromXContent = RepositoryData.fromXContent(parser, gen); + assertEquals(repositoryData, fromXContent); + assertEquals(gen, fromXContent.getGenId()); } public void testAddSnapshots() { @@ -64,8 +67,6 @@ public class RepositoryDataTests extends ESTestCase { // test that adding the same snapshot id to the repository data throws an exception final SnapshotId snapshotId = repositoryData.getSnapshotIds().get(0); Map indexIdMap = repositoryData.getIndices(); - expectThrows(IllegalArgumentException.class, - () -> repositoryData.addSnapshot(new SnapshotId(snapshotId.getName(), snapshotId.getUUID()), Collections.emptyList())); // test that adding a snapshot and its indices works SnapshotId newSnapshot = new SnapshotId(randomAsciiOfLength(7), UUIDs.randomBase64UUID()); List indices = new ArrayList<>(); @@ -91,22 +92,7 @@ public class RepositoryDataTests extends ESTestCase { assertEquals(snapshotIds.size(), 1); // if it was a new index, only the new snapshot should be in its set } } - } - - public void testInitIndices() { - final int numSnapshots = randomIntBetween(1, 30); - final List snapshotIds = new ArrayList<>(numSnapshots); - for (int i = 0; i < numSnapshots; i++) { - snapshotIds.add(new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID())); - } - RepositoryData repositoryData = new RepositoryData(snapshotIds, Collections.emptyMap()); - // test that initializing indices works - Map> indices = randomIndices(snapshotIds); - RepositoryData newRepoData = repositoryData.initIndices(indices); - assertEquals(repositoryData.getSnapshotIds(), newRepoData.getSnapshotIds()); - for (IndexId indexId : indices.keySet()) { - assertEquals(indices.get(indexId), newRepoData.getSnapshots(indexId)); - } + assertEquals(repositoryData.getGenId(), newRepoData.getGenId()); } public void testRemoveSnapshot() { @@ -135,12 +121,8 @@ public class RepositoryDataTests extends ESTestCase { } public static RepositoryData generateRandomRepoData() { - return generateRandomRepoData(new ArrayList<>()); - } - - public static RepositoryData generateRandomRepoData(final List origSnapshotIds) { - List snapshotIds = randomSnapshots(origSnapshotIds); - return new RepositoryData(snapshotIds, randomIndices(snapshotIds)); + List snapshotIds = randomSnapshots(new ArrayList<>()); + return RepositoryData.initRepositoryData(snapshotIds, randomIndices(snapshotIds)); } private static List randomSnapshots(final List origSnapshotIds) { diff --git a/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 07e6aa0f16c..f5f036a2359 100644 --- a/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -24,16 +24,18 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRes import org.elasticsearch.client.Client; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESSingleNodeTestCase; +import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.stream.Collectors; @@ -105,26 +107,22 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase { // write to and read from a index file with no entries assertThat(repository.getSnapshots().size(), equalTo(0)); final RepositoryData emptyData = RepositoryData.EMPTY; - repository.writeIndexGen(emptyData); - final RepositoryData readData = repository.getRepositoryData(); - assertEquals(readData, emptyData); - assertEquals(readData.getIndices().size(), 0); - assertEquals(readData.getSnapshotIds().size(), 0); + repository.writeIndexGen(emptyData, emptyData.getGenId()); + RepositoryData repoData = repository.getRepositoryData(); + assertEquals(repoData, emptyData); + assertEquals(repoData.getIndices().size(), 0); + assertEquals(repoData.getSnapshotIds().size(), 0); + assertEquals(0L, repoData.getGenId()); // write to and read from an index file with snapshots but no indices - final int numSnapshots = randomIntBetween(1, 20); - final List snapshotIds = new ArrayList<>(numSnapshots); - for (int i = 0; i < numSnapshots; i++) { - snapshotIds.add(new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID())); - } - RepositoryData repositoryData = new RepositoryData(snapshotIds, Collections.emptyMap()); - repository.writeIndexGen(repositoryData); - assertEquals(repository.getRepositoryData(), repositoryData); + repoData = addRandomSnapshotsToRepoData(repoData, false); + repository.writeIndexGen(repoData, repoData.getGenId()); + assertEquals(repoData, repository.getRepositoryData()); // write to and read from a index file with random repository data - repositoryData = generateRandomRepoData(); - repository.writeIndexGen(repositoryData); - assertThat(repository.getRepositoryData(), equalTo(repositoryData)); + repoData = addRandomSnapshotsToRepoData(repository.getRepositoryData(), true); + repository.writeIndexGen(repoData, repoData.getGenId()); + assertEquals(repoData, repository.getRepositoryData()); } public void testIndexGenerationalFiles() throws Exception { @@ -132,26 +130,38 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase { // write to index generational file RepositoryData repositoryData = generateRandomRepoData(); - repository.writeIndexGen(repositoryData); + repository.writeIndexGen(repositoryData, repositoryData.getGenId()); assertThat(repository.getRepositoryData(), equalTo(repositoryData)); assertThat(repository.latestIndexBlobId(), equalTo(0L)); assertThat(repository.readSnapshotIndexLatestBlob(), equalTo(0L)); // adding more and writing to a new index generational file - repositoryData = generateRandomRepoData(); - repository.writeIndexGen(repositoryData); + repositoryData = addRandomSnapshotsToRepoData(repository.getRepositoryData(), true); + repository.writeIndexGen(repositoryData, repositoryData.getGenId()); assertEquals(repository.getRepositoryData(), repositoryData); assertThat(repository.latestIndexBlobId(), equalTo(1L)); assertThat(repository.readSnapshotIndexLatestBlob(), equalTo(1L)); // removing a snapshot and writing to a new index generational file - repositoryData = repositoryData.removeSnapshot(repositoryData.getSnapshotIds().get(0)); - repository.writeIndexGen(repositoryData); + repositoryData = repository.getRepositoryData().removeSnapshot(repositoryData.getSnapshotIds().get(0)); + repository.writeIndexGen(repositoryData, repositoryData.getGenId()); assertEquals(repository.getRepositoryData(), repositoryData); assertThat(repository.latestIndexBlobId(), equalTo(2L)); assertThat(repository.readSnapshotIndexLatestBlob(), equalTo(2L)); } + public void testRepositoryDataConcurrentModificationNotAllowed() throws IOException { + final BlobStoreRepository repository = setupRepo(); + + // write to index generational file + RepositoryData repositoryData = generateRandomRepoData(); + repository.writeIndexGen(repositoryData, repositoryData.getGenId()); + + // write repo data again to index generational file, errors because we already wrote to the + // N+1 generation from which this repository data instance was created + expectThrows(RepositoryException.class, () -> repository.writeIndexGen(repositoryData, repositoryData.getGenId())); + } + private BlobStoreRepository setupRepo() { final Client client = client(); final Path location = ESIntegTestCase.randomRepoPath(node().settings()); @@ -170,4 +180,18 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase { return repository; } + private RepositoryData addRandomSnapshotsToRepoData(RepositoryData repoData, boolean inclIndices) { + int numSnapshots = randomIntBetween(1, 20); + for (int i = 0; i < numSnapshots; i++) { + SnapshotId snapshotId = new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID()); + int numIndices = inclIndices ? randomIntBetween(0, 20) : 0; + List indexIds = new ArrayList<>(numIndices); + for (int j = 0; j < numIndices; j++) { + indexIds.add(new IndexId(randomAsciiOfLength(8), UUIDs.randomBase64UUID())); + } + repoData = repoData.addSnapshot(snapshotId, indexIds); + } + return repoData; + } + } diff --git a/core/src/test/java/org/elasticsearch/snapshots/MinThreadsSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/MinThreadsSnapshotRestoreIT.java new file mode 100644 index 00000000000..d1759d83e34 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/snapshots/MinThreadsSnapshotRestoreIT.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.snapshots; + +import org.elasticsearch.action.ListenableActionFuture; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.snapshots.mockstore.MockRepository; + +import java.util.Collection; +import java.util.Collections; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; + +/** + * Tests for snapshot/restore that require at least 2 threads available + * in the thread pool (for example, tests that use the mock repository that + * block on master). + */ +public class MinThreadsSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put("thread_pool.snapshot.core", 2) + .put("thread_pool.snapshot.max", 2) + .build(); + } + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(MockRepository.Plugin.class); + } + + public void testConcurrentSnapshotDeletionsNotAllowed() throws Exception { + logger.info("--> creating repository"); + final String repo = "test-repo"; + assertAcked(client().admin().cluster().preparePutRepository(repo).setType("mock").setSettings( + Settings.builder() + .put("location", randomRepoPath()) + .put("random", randomAsciiOfLength(10)) + .put("wait_after_unblock", 200)).get()); + + logger.info("--> snapshot twice"); + final String index = "test-idx1"; + assertAcked(prepareCreate(index, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0))); + for (int i = 0; i < 10; i++) { + index(index, "doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + final String snapshot1 = "test-snap1"; + client().admin().cluster().prepareCreateSnapshot(repo, snapshot1).setWaitForCompletion(true).get(); + final String index2 = "test-idx2"; + assertAcked(prepareCreate(index2, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0))); + for (int i = 0; i < 10; i++) { + index(index2, "doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + final String snapshot2 = "test-snap2"; + client().admin().cluster().prepareCreateSnapshot(repo, snapshot2).setWaitForCompletion(true).get(); + + String blockedNode = internalCluster().getMasterName(); + ((MockRepository)internalCluster().getInstance(RepositoriesService.class, blockedNode).repository(repo)).blockOnDataFiles(true); + logger.info("--> start deletion of first snapshot"); + ListenableActionFuture future = + client().admin().cluster().prepareDeleteSnapshot(repo, snapshot2).execute(); + logger.info("--> waiting for block to kick in on node [{}]", blockedNode); + waitForBlock(blockedNode, repo, TimeValue.timeValueSeconds(10)); + + logger.info("--> try deleting the second snapshot, should fail because the first deletion is in progress"); + try { + client().admin().cluster().prepareDeleteSnapshot(repo, snapshot1).get(); + fail("should not be able to delete snapshots concurrently"); + } catch (ConcurrentSnapshotExecutionException e) { + assertThat(e.getMessage(), containsString("cannot delete - another snapshot is currently being deleted")); + } + + logger.info("--> unblocking blocked node [{}]", blockedNode); + unblockNode(repo, blockedNode); + + logger.info("--> wait until first snapshot is finished"); + assertAcked(future.actionGet()); + + logger.info("--> delete second snapshot, which should now work"); + client().admin().cluster().prepareDeleteSnapshot(repo, snapshot1).get(); + assertTrue(client().admin().cluster().prepareGetSnapshots(repo).setSnapshots("_all").get().getSnapshots().isEmpty()); + } + + public void testSnapshottingWithInProgressDeletionNotAllowed() throws Exception { + logger.info("--> creating repository"); + final String repo = "test-repo"; + assertAcked(client().admin().cluster().preparePutRepository(repo).setType("mock").setSettings( + Settings.builder() + .put("location", randomRepoPath()) + .put("random", randomAsciiOfLength(10)) + .put("wait_after_unblock", 200)).get()); + + logger.info("--> snapshot"); + final String index = "test-idx"; + assertAcked(prepareCreate(index, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0))); + for (int i = 0; i < 10; i++) { + index(index, "doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + final String snapshot1 = "test-snap1"; + client().admin().cluster().prepareCreateSnapshot(repo, snapshot1).setWaitForCompletion(true).get(); + + String blockedNode = internalCluster().getMasterName(); + ((MockRepository)internalCluster().getInstance(RepositoriesService.class, blockedNode).repository(repo)).blockOnDataFiles(true); + logger.info("--> start deletion of snapshot"); + ListenableActionFuture future = + client().admin().cluster().prepareDeleteSnapshot(repo, snapshot1).execute(); + logger.info("--> waiting for block to kick in on node [{}]", blockedNode); + waitForBlock(blockedNode, repo, TimeValue.timeValueSeconds(10)); + + logger.info("--> try creating a second snapshot, should fail because the deletion is in progress"); + final String snapshot2 = "test-snap2"; + try { + client().admin().cluster().prepareCreateSnapshot(repo, snapshot2).setWaitForCompletion(true).get(); + fail("should not be able to create a snapshot while another is being deleted"); + } catch (ConcurrentSnapshotExecutionException e) { + assertThat(e.getMessage(), containsString("cannot snapshot while a snapshot deletion is in-progress")); + } + + logger.info("--> unblocking blocked node [{}]", blockedNode); + unblockNode(repo, blockedNode); + + logger.info("--> wait until snapshot deletion is finished"); + assertAcked(future.actionGet()); + + logger.info("--> creating second snapshot, which should now work"); + client().admin().cluster().prepareCreateSnapshot(repo, snapshot2).setWaitForCompletion(true).get(); + assertEquals(1, client().admin().cluster().prepareGetSnapshots(repo).setSnapshots("_all").get().getSnapshots().size()); + } + + public void testRestoreWithInProgressDeletionsNotAllowed() throws Exception { + logger.info("--> creating repository"); + final String repo = "test-repo"; + assertAcked(client().admin().cluster().preparePutRepository(repo).setType("mock").setSettings( + Settings.builder() + .put("location", randomRepoPath()) + .put("random", randomAsciiOfLength(10)) + .put("wait_after_unblock", 200)).get()); + + logger.info("--> snapshot"); + final String index = "test-idx"; + assertAcked(prepareCreate(index, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0))); + for (int i = 0; i < 10; i++) { + index(index, "doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + final String snapshot1 = "test-snap1"; + client().admin().cluster().prepareCreateSnapshot(repo, snapshot1).setWaitForCompletion(true).get(); + final String index2 = "test-idx2"; + assertAcked(prepareCreate(index2, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0))); + for (int i = 0; i < 10; i++) { + index(index2, "doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + final String snapshot2 = "test-snap2"; + client().admin().cluster().prepareCreateSnapshot(repo, snapshot2).setWaitForCompletion(true).get(); + client().admin().indices().prepareClose(index, index2).get(); + + String blockedNode = internalCluster().getMasterName(); + ((MockRepository)internalCluster().getInstance(RepositoriesService.class, blockedNode).repository(repo)).blockOnDataFiles(true); + logger.info("--> start deletion of snapshot"); + ListenableActionFuture future = + client().admin().cluster().prepareDeleteSnapshot(repo, snapshot2).execute(); + logger.info("--> waiting for block to kick in on node [{}]", blockedNode); + waitForBlock(blockedNode, repo, TimeValue.timeValueSeconds(10)); + + logger.info("--> try restoring the other snapshot, should fail because the deletion is in progress"); + try { + client().admin().cluster().prepareRestoreSnapshot(repo, snapshot1).setWaitForCompletion(true).get(); + fail("should not be able to restore a snapshot while another is being deleted"); + } catch (ConcurrentSnapshotExecutionException e) { + assertThat(e.getMessage(), containsString("cannot restore a snapshot while a snapshot deletion is in-progress")); + } + + logger.info("--> unblocking blocked node [{}]", blockedNode); + unblockNode(repo, blockedNode); + + logger.info("--> wait until snapshot deletion is finished"); + assertAcked(future.actionGet()); + + logger.info("--> restoring snapshot, which should now work"); + client().admin().cluster().prepareRestoreSnapshot(repo, snapshot1).setWaitForCompletion(true).get(); + assertEquals(1, client().admin().cluster().prepareGetSnapshots(repo).setSnapshots("_all").get().getSnapshots().size()); + } +} diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index dda634023b6..ef6275e2ccd 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -2277,6 +2277,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas State.ABORTED, Collections.singletonList(indexId), System.currentTimeMillis(), + repositoryData.getGenId(), shards.build())); return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(Collections.unmodifiableList(entries))).build(); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yaml index 43db8e5206f..b12b9d09b6f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yaml @@ -30,3 +30,10 @@ setup: - match: { snapshot.state : SUCCESS } - match: { snapshot.shards.successful: 1 } - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.delete: + repository: test_repo_create_1 + snapshot: test_snapshot + + - match: { acknowledged: true } From 1cb5dc42ff0fd17fa13bc2ad9122e79a7a60cb47 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Sun, 25 Dec 2016 19:27:34 -0500 Subject: [PATCH 022/119] Updates SnapshotDeletionsInProgress version number introduced to 5.2.0 --- .../org/elasticsearch/cluster/SnapshotDeletionsInProgress.java | 2 +- .../java/org/elasticsearch/cluster/SnapshotsInProgress.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index f6257fd7a92..a0c006f9c71 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -41,7 +41,7 @@ public class SnapshotDeletionsInProgress extends AbstractDiffable implem public static final String TYPE = "snapshot_deletions"; public static final SnapshotDeletionsInProgress PROTO = new SnapshotDeletionsInProgress(Collections.emptyList()); // the version where SnapshotDeletionsInProgress was introduced - public static final Version VERSION_INTRODUCED = Version.V_6_0_0_alpha1_UNRELEASED; + public static final Version VERSION_INTRODUCED = Version.V_5_2_0_UNRELEASED; // the list of snapshot deletion request entries private final List entries; diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 26ddbec7a2a..b5d955ecc06 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -53,7 +53,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus // a snapshot in progress from a pre 5.2.x node public static final long UNDEFINED_REPOSITORY_STATE_ID = -2L; // the version where repository state ids were introduced - private static final Version REPOSITORY_ID_INTRODUCED_VERSION = Version.V_6_0_0_alpha1_UNRELEASED; + private static final Version REPOSITORY_ID_INTRODUCED_VERSION = Version.V_5_2_0_UNRELEASED; @Override public boolean equals(Object o) { From d89757b84845cea8dca598c8fd66c5453f0525ca Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 26 Dec 2016 10:34:50 +0100 Subject: [PATCH 023/119] Fix mutate function to always actually modify the failure object. --- .../replication/ReplicationResponseTests.java | 23 +++++++++++++++---- .../test/EqualsHashCodeTestUtils.java | 3 ++- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java index ca8f0a4c6b2..d3b8975ac78 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java @@ -34,8 +34,11 @@ import org.elasticsearch.test.EqualsHashCodeTestUtils; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; import java.util.List; import java.util.Locale; +import java.util.Set; import java.util.function.Supplier; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; @@ -81,11 +84,15 @@ public class ReplicationResponseTests extends ESTestCase { List> mutations = new ArrayList<>(); final Index index = failure.fullShardId().getIndex(); - final ShardId randomIndex = new ShardId(randomUnicodeOfCodepointLength(5), index.getUUID(), failure.shardId()); + final Set indexNamePool = new HashSet<>(Arrays.asList(randomUnicodeOfCodepointLength(5), randomUnicodeOfCodepointLength(6))); + indexNamePool.remove(index.getName()); + final ShardId randomIndex = new ShardId(randomFrom(indexNamePool), index.getUUID(), failure.shardId()); mutations.add(() -> new ReplicationResponse.ShardInfo.Failure(randomIndex, failure.nodeId(), (Exception) failure.getCause(), failure.status(), failure.primary())); - final ShardId randomUUID = new ShardId(index.getName(), randomUnicodeOfCodepointLength(5), failure.shardId()); + final Set uuidPool = new HashSet<>(Arrays.asList(randomUnicodeOfCodepointLength(5), randomUnicodeOfCodepointLength(6))); + uuidPool.remove(index.getUUID()); + final ShardId randomUUID = new ShardId(index.getName(), randomFrom(uuidPool), failure.shardId()); mutations.add(() -> new ReplicationResponse.ShardInfo.Failure(randomUUID, failure.nodeId(), (Exception) failure.getCause(), failure.status(), failure.primary())); @@ -93,15 +100,21 @@ public class ReplicationResponseTests extends ESTestCase { mutations.add(() -> new ReplicationResponse.ShardInfo.Failure(randomShardId, failure.nodeId(), (Exception) failure.getCause(), failure.status(), failure.primary())); - final String randomNode = randomUnicodeOfLength(3); + final Set nodeIdPool = new HashSet<>(Arrays.asList(randomUnicodeOfLength(3), randomUnicodeOfLength(4))); + nodeIdPool.remove(failure.nodeId()); + final String randomNode = randomFrom(nodeIdPool); mutations.add(() -> new ReplicationResponse.ShardInfo.Failure(failure.fullShardId(), randomNode, (Exception) failure.getCause(), failure.status(), failure.primary())); - final Exception randomException = randomFrom(new IllegalStateException("a"), new IllegalArgumentException("b")); + final Set exceptionPool = new HashSet<>(Arrays.asList(new IllegalStateException("a"), new IllegalArgumentException("b"))); + exceptionPool.remove(failure.getCause()); + final Exception randomException = randomFrom(exceptionPool); mutations.add(() -> new ReplicationResponse.ShardInfo.Failure(failure.fullShardId(), failure.nodeId(), randomException, failure.status(), failure.primary())); - final RestStatus randomStatus = randomFrom(RestStatus.values()); + final Set otherStatuses = new HashSet<>(Arrays.asList(RestStatus.values())); + otherStatuses.remove(failure.status()); + final RestStatus randomStatus = randomFrom(otherStatuses); mutations.add(() -> new ReplicationResponse.ShardInfo.Failure(failure.fullShardId(), failure.nodeId(), (Exception) failure.getCause(), randomStatus, failure.primary())); diff --git a/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java index bf1cd8132da..76cfcce033b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java @@ -78,7 +78,8 @@ public class EqualsHashCodeTestUtils { assertThat(objectName + " hashcode returns different values if called multiple times", original.hashCode(), equalTo(original.hashCode())); if (mutationFunction != null) { - assertThat(objectName + " mutation should not be equal to original", mutationFunction.mutate(original), + T mutation = mutationFunction.mutate(original); + assertThat(objectName + " mutation should not be equal to original", mutation, not(equalTo(original))); } From f80165c374acc12a0dc594cdbf9de737412dc1aa Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 26 Dec 2016 11:22:09 +0100 Subject: [PATCH 024/119] Fix LineLength issues. --- .../support/replication/ReplicationResponseTests.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java index d3b8975ac78..23c0d714be7 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java @@ -84,7 +84,8 @@ public class ReplicationResponseTests extends ESTestCase { List> mutations = new ArrayList<>(); final Index index = failure.fullShardId().getIndex(); - final Set indexNamePool = new HashSet<>(Arrays.asList(randomUnicodeOfCodepointLength(5), randomUnicodeOfCodepointLength(6))); + final Set indexNamePool = new HashSet<>(Arrays.asList( + randomUnicodeOfCodepointLength(5), randomUnicodeOfCodepointLength(6))); indexNamePool.remove(index.getName()); final ShardId randomIndex = new ShardId(randomFrom(indexNamePool), index.getUUID(), failure.shardId()); mutations.add(() -> new ReplicationResponse.ShardInfo.Failure(randomIndex, failure.nodeId(), (Exception) failure.getCause(), @@ -106,7 +107,8 @@ public class ReplicationResponseTests extends ESTestCase { mutations.add(() -> new ReplicationResponse.ShardInfo.Failure(failure.fullShardId(), randomNode, (Exception) failure.getCause(), failure.status(), failure.primary())); - final Set exceptionPool = new HashSet<>(Arrays.asList(new IllegalStateException("a"), new IllegalArgumentException("b"))); + final Set exceptionPool = new HashSet<>(Arrays.asList( + new IllegalStateException("a"), new IllegalArgumentException("b"))); exceptionPool.remove(failure.getCause()); final Exception randomException = randomFrom(exceptionPool); mutations.add(() -> new ReplicationResponse.ShardInfo.Failure(failure.fullShardId(), failure.nodeId(), randomException, From 2d81750a13c84ef060ab46899793ed4e2b3ad522 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 26 Dec 2016 14:55:22 +0100 Subject: [PATCH 025/119] Make ESTestCase resilient to initialization errors. --- .../elasticsearch/common/logging/DeprecationLogger.java | 3 ++- .../src/main/java/org/elasticsearch/test/ESTestCase.java | 8 +++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index 55f89ce84ad..39b882f1a1d 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.SuppressLoggerChecks; import org.elasticsearch.common.util.concurrent.ThreadContext; import java.util.Iterator; +import java.util.Objects; import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; @@ -63,7 +64,7 @@ public class DeprecationLogger { * @throws IllegalStateException if this {@code threadContext} has already been set */ public static void setThreadContext(ThreadContext threadContext) { - assert threadContext != null; + Objects.requireNonNull(threadContext, "Cannot register a null ThreadContext"); // add returning false means it _did_ have it already if (THREAD_CONTEXT.add(threadContext) == false) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 567c987ebf0..ca4f846fe39 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -255,6 +255,7 @@ public abstract class ESTestCase extends LuceneTestCase { @Before public final void before() { logger.info("[{}]: before test", getTestName()); + assertNull("Thread context initialized twice", threadContext); if (enableWarningsCheck()) { this.threadContext = new ThreadContext(Settings.EMPTY); DeprecationLogger.setThreadContext(threadContext); @@ -272,8 +273,13 @@ public abstract class ESTestCase extends LuceneTestCase { @After public final void after() throws Exception { checkStaticState(); - if (enableWarningsCheck()) { + // We check threadContext != null rather than enableWarningsCheck() + // because after methods are still called in the event that before + // methods failed, in which case threadContext might not have been + // initialized + if (threadContext != null) { ensureNoWarnings(); + threadContext = null; } ensureAllSearchContextsReleased(); ensureCheckIndexPassed(); From 2127db27a32320249e2b9187e9359b86b70de92b Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 26 Dec 2016 16:05:27 +0100 Subject: [PATCH 026/119] Add trace logging to CircuitBreakerServiceIT.testParentChecking. --- .../indices/memory/breaker/CircuitBreakerServiceIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index ed371eca895..63517dbdc97 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -48,6 +48,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.After; import org.junit.Before; @@ -217,6 +218,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { * Test that a breaker correctly redistributes to a different breaker, in * this case, the fielddata breaker borrows space from the request breaker */ + @TestLogging("_root:DEBUG,org.elasticsearch.action.search:TRACE") public void testParentChecking() throws Exception { if (noopBreakerUsed()) { logger.info("--> noop breakers used, skipping test"); From 3cb164b22ebf225e4f0f6dea0114fe82f2a5377e Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 26 Dec 2016 20:19:11 +0100 Subject: [PATCH 027/119] Fix IndexShardTests.testDocStats. --- .../java/org/elasticsearch/index/shard/IndexShardTests.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index d01aeb2bf00..972232debaa 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1368,7 +1368,9 @@ public class IndexShardTests extends IndexShardTestCase { try { indexShard = newStartedShard(); final long numDocs = randomIntBetween(2, 32); // at least two documents so we have docs to delete - final long numDocsToDelete = randomIntBetween(1, Math.toIntExact(numDocs)); + // Delete at least numDocs/10 documents otherwise the number of deleted docs will be below 10% + // and forceMerge will refuse to expunge deletes + final long numDocsToDelete = randomIntBetween((int) Math.ceil(numDocs / 10.0), Math.toIntExact(numDocs)); for (int i = 0; i < numDocs; i++) { final String id = Integer.toString(i); final ParsedDocument doc = From e7444f7d779db8346c1053d622d21b09d5dd15b3 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 27 Dec 2016 09:23:22 +0100 Subject: [PATCH 028/119] Fix scaled_float numeric type in aggregations (#22351) `scaled_float` should be used as DOUBLE in aggregations but currently they are used as LONG. This change fixes this issue and adds a simple it test for it. Fixes #22350 --- .../index/mapper/ScaledFloatFieldMapper.java | 5 +- .../mapper/ScaledFloatFieldTypeTests.java | 1 + .../test/search.aggregation/20_terms.yaml | 49 +++++++++++++++++++ 3 files changed, 54 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index c1ac00326a7..6cd69c0fc6b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -504,7 +504,10 @@ public class ScaledFloatFieldMapper extends FieldMapper { @Override public NumericType getNumericType() { - return scaledFieldData.getNumericType(); + /** + * {@link ScaledFloatLeafFieldData#getDoubleValues()} transforms the raw long values in `scaled` floats. + */ + return NumericType.DOUBLE; } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java index fd390c5da8a..1ba58fa2ded 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java @@ -182,6 +182,7 @@ public class ScaledFloatFieldTypeTests extends FieldTypeTestCase { // single-valued ft.setName("scaled_float1"); IndexNumericFieldData fielddata = (IndexNumericFieldData) ft.fielddataBuilder().build(indexSettings, ft, null, null, null); + assertEquals(fielddata.getNumericType(), IndexNumericFieldData.NumericType.DOUBLE); AtomicNumericFieldData leafFieldData = fielddata.load(reader.leaves().get(0)); SortedNumericDoubleValues values = leafFieldData.getDoubleValues(); values.setDocument(0); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml index 769865bd0e3..b1b9baf8ba8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml @@ -18,6 +18,9 @@ setup: type: long double: type: double + scaled_float: + type: scaled_float + scaling_factor: 100 date: type: date @@ -282,6 +285,52 @@ setup: - match: { aggregations.double_terms.buckets.1.doc_count: 1 } +--- +"Scaled float test": + - do: + index: + index: test_1 + type: test + id: 1 + body: { "scaled_float": 9.99 } + + - do: + index: + index: test_1 + type: test + id: 2 + body: { "scaled_float": 9.994 } + + - do: + index: + index: test_1 + type: test + id: 3 + body: { "scaled_float": 8.99 } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "scaled_float_terms" : { "terms" : { "field" : "scaled_float" } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.scaled_float_terms.buckets: 2 } + + - match: { aggregations.scaled_float_terms.buckets.0.key: 9.99 } + + - is_false: aggregations.scaled_float_terms.buckets.0.key_as_string + + - match: { aggregations.scaled_float_terms.buckets.0.doc_count: 2 } + + - match: { aggregations.scaled_float_terms.buckets.1.key: 8.99 } + + - is_false: aggregations.scaled_float_terms.buckets.1.key_as_string + + - match: { aggregations.scaled_float_terms.buckets.1.doc_count: 1 } + --- "Date test": - do: From e6fb3a5d950c52ccba8cf1f0b0d6e819e70c243c Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Tue, 27 Dec 2016 10:57:11 -0800 Subject: [PATCH 029/119] fix index out of bounds error in KV Processor (#22288) - checks for index-out-of-bounds - added unit tests for failed `field_split` and `value_split` scenarios missed this test in #22272. --- .../ingest/common/KeyValueProcessor.java | 8 +++++++- .../ingest/common/KeyValueProcessorTests.java | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java index d1f6eb7caf9..3132439cd7d 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java @@ -98,7 +98,13 @@ public final class KeyValueProcessor extends AbstractProcessor { String fieldPathPrefix = (targetField == null) ? "" : targetField + "."; Arrays.stream(oldVal.split(fieldSplit)) - .map((f) -> f.split(valueSplit, 2)) + .map((f) -> { + String[] kv = f.split(valueSplit, 2); + if (kv.length != 2) { + throw new IllegalArgumentException("field [" + field + "] does not contain value_split [" + valueSplit + "]"); + } + return kv; + }) .filter((p) -> includeKeys == null || includeKeys.contains(p[0])) .forEach((p) -> append(document, fieldPathPrefix + p[0], p[1])); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorTests.java index 2d5f71bf54e..f5db4be1435 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorTests.java @@ -93,4 +93,20 @@ public class KeyValueProcessorTests extends ESTestCase { processor.execute(ingestDocument); assertIngestDocument(originalIngestDocument, ingestDocument); } + + public void testFailFieldSplitMatch() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "first=hello|second=world|second=universe"); + Processor processor = new KeyValueProcessor(randomAsciiOfLength(10), fieldName, "&", "=", null, "target", false); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue("target.first", String.class), equalTo("hello|second=world|second=universe")); + assertFalse(ingestDocument.hasField("target.second")); + } + + public void testFailValueSplitMatch() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("foo", "bar")); + Processor processor = new KeyValueProcessor(randomAsciiOfLength(10), "foo", "&", "=", null, "target", false); + Exception exception = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(exception.getMessage(), equalTo("field [foo] does not contain value_split [=]")); + } } From 02d4cbfeeadc3d5dcbe16153d5d90dc9ed54134f Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 27 Dec 2016 21:52:27 +0100 Subject: [PATCH 030/119] Fix bwc integ test that tries to perform a term aggs on a scaled_float. This is broken when a node with version prior to 5.2.0 is used with another node > 5.2.0. This is because scaled_float fields are considered as longs in version < 5.2.0. This is fixed in 5.2.0 where scaled_float are recognized as doubles. --- .../rest-api-spec/test/search.aggregation/20_terms.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml index b1b9baf8ba8..c3f89f21381 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml @@ -287,6 +287,10 @@ setup: --- "Scaled float test": + - skip: + version: " - 5.2.0" + reason: scaled_float were considered as longs in aggregations, this was fixed in 5.2.0 + - do: index: index: test_1 From 9ccdd3303d0a6d6bceea7ef5de74bbae5f0340dd Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 27 Dec 2016 21:10:01 +0100 Subject: [PATCH 031/119] percolator: Fix NPE in percolator's 'now' range check for percolator queries with range queries. Closes #22355 --- .../percolator/PercolatorFieldMapper.java | 10 +++- .../PercolatorFieldMapperTests.java | 47 +++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index ecc037089ab..e1511c216ae 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -377,12 +377,18 @@ public class PercolatorFieldMapper extends FieldMapper { RangeQueryBuilder rangeQueryBuilder = (RangeQueryBuilder) queryBuilder; if (rangeQueryBuilder.from() instanceof String) { String from = (String) rangeQueryBuilder.from(); - String to = (String) rangeQueryBuilder.to(); - if (from.contains("now") || to.contains("now")) { + if (from.contains("now")) { throw new IllegalArgumentException("percolator queries containing time range queries based on the " + "current time is unsupported"); } } + if (rangeQueryBuilder.to() instanceof String) { + String to = (String) rangeQueryBuilder.to(); + if (to.contains("now")) { + throw new IllegalArgumentException("percolator queries containing time range queries based on the " + + "current time is unsupported"); + } + } } else if (queryBuilder instanceof HasChildQueryBuilder) { throw new IllegalArgumentException("the [has_child] query is unsupported inside a percolator query"); } else if (queryBuilder instanceof HasParentQueryBuilder) { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index e5a4fe18d91..4d94ab3a296 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -442,6 +442,53 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { } ); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + e = expectThrows(MapperParsingException.class, () -> { + mapperService.documentMapper(typeName).parse("test", typeName, "1", + jsonBuilder().startObject() + .field(fieldName, rangeQuery("date_field").from("now")) + .endObject().bytes()); + } + ); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + e = expectThrows(MapperParsingException.class, () -> { + mapperService.documentMapper(typeName).parse("test", typeName, "1", + jsonBuilder().startObject() + .field(fieldName, rangeQuery("date_field").to("now")) + .endObject().bytes()); + } + ); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + } + + // https://github.com/elastic/elasticsearch/issues/22355 + public void testVerifyRangeQueryWithNullBounds() throws Exception { + addQueryMapping(); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> { + mapperService.documentMapper(typeName).parse("test", typeName, "1", + jsonBuilder().startObject() + .field(fieldName, rangeQuery("date_field").from("now").to(null)) + .endObject().bytes()); + } + ); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + e = expectThrows(MapperParsingException.class, () -> { + mapperService.documentMapper(typeName).parse("test", typeName, "1", + jsonBuilder().startObject() + .field(fieldName, rangeQuery("date_field").from(null).to("now")) + .endObject().bytes()); + } + ); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + + // No validation failures: + mapperService.documentMapper(typeName).parse("test", typeName, "1", + jsonBuilder().startObject() + .field(fieldName, rangeQuery("date_field").from("2016-01-01").to(null)) + .endObject().bytes()); + mapperService.documentMapper(typeName).parse("test", typeName, "1", + jsonBuilder().startObject() + .field(fieldName, rangeQuery("date_field").from(null).to("2016-01-01")) + .endObject().bytes()); } public void testUnsupportedQueries() { From dea8cee70fef9410e7ed019a9928d8fff07c5ef6 Mon Sep 17 00:00:00 2001 From: Itamar Syn-Hershko Date: Wed, 28 Dec 2016 19:49:34 +0200 Subject: [PATCH 032/119] Fixing a second "second" reference in docs (#22345) --- docs/reference/setup/sysconfig/swap.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/setup/sysconfig/swap.asciidoc b/docs/reference/setup/sysconfig/swap.asciidoc index 08752e6fd93..19b6f751ee7 100644 --- a/docs/reference/setup/sysconfig/swap.asciidoc +++ b/docs/reference/setup/sysconfig/swap.asciidoc @@ -92,7 +92,7 @@ via `System Properties → Advanced → Performance → Advanced → Virtual mem [[swappiness]] ==== Configure `swappiness` -The second option available on Linux systems is to ensure that the sysctl value +Another option available on Linux systems is to ensure that the sysctl value `vm.swappiness` is set to `1`. This reduces the kernel's tendency to swap and should not lead to swapping under normal circumstances, while still allowing the whole system to swap in emergency conditions. From ca90d9ea82c3ec2162a669b199c63ef5103b2fe9 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Tue, 27 Dec 2016 22:31:34 -0500 Subject: [PATCH 033/119] Remove PROTO-based custom cluster state components Switches custom cluster state components from PROTO-based de-serialization to named objects based de-serialization --- .../reroute/ClusterRerouteResponse.java | 2 +- .../cluster/state/ClusterStateResponse.java | 2 +- .../indices/alias/get/GetAliasesResponse.java | 2 +- .../admin/indices/get/GetIndexResponse.java | 4 +- .../mapping/get/GetMappingsResponse.java | 2 +- .../get/GetIndexTemplatesResponse.java | 2 +- .../action/ingest/GetPipelineResponse.java | 2 +- .../client/transport/TransportClient.java | 2 + .../cluster/AbstractDiffable.java | 11 +- .../cluster/AbstractNamedDiffable.java | 114 ++++++++++++++ .../elasticsearch/cluster/ClusterModule.java | 59 +++++++ .../elasticsearch/cluster/ClusterState.java | 92 ++++------- .../org/elasticsearch/cluster/Diffable.java | 9 -- .../elasticsearch/cluster/DiffableUtils.java | 29 ++-- .../NamedDiff.java} | 15 +- .../cluster/RestoreInProgress.java | 18 +-- .../cluster/SnapshotDeletionsInProgress.java | 14 +- .../cluster/SnapshotsInProgress.java | 15 +- .../cluster/block/ClusterBlocks.java | 57 +++---- .../cluster/metadata/AliasMetaData.java | 32 ++-- .../cluster/metadata/IndexGraveyard.java | 27 ++-- .../cluster/metadata/IndexMetaData.java | 47 +++--- .../metadata/IndexTemplateMetaData.java | 15 +- .../cluster/metadata/MappingMetaData.java | 17 ++- .../cluster/metadata/MetaData.java | 116 ++++---------- .../metadata/RepositoriesMetaData.java | 29 ++-- .../cluster/metadata/RepositoryMetaData.java | 15 +- .../cluster/node/DiscoveryNodes.java | 13 +- .../cluster/routing/IndexRoutingTable.java | 21 +-- .../cluster/routing/RoutingTable.java | 17 +-- .../NamedWriteableAwareStreamInput.java | 10 +- .../common/io/stream/StreamInput.java | 16 ++ .../common/util/IndexFolderUpgrader.java | 3 +- .../discovery/DiscoveryModule.java | 12 +- .../discovery/zen/MembershipAction.java | 2 +- .../zen/PublishClusterStateAction.java | 10 +- .../discovery/zen/ZenDiscovery.java | 5 + .../elasticsearch/env/NodeEnvironment.java | 3 +- .../gateway/LocalAllocateDangledIndices.java | 2 +- .../gateway/MetaDataStateFormat.java | 12 +- .../gateway/MetaStateService.java | 14 +- .../TransportNodesListGatewayMetaState.java | 2 +- ...ransportNodesListGatewayStartedShards.java | 6 +- .../elasticsearch/index/seqno/SeqNoStats.java | 2 - .../elasticsearch/index/shard/ShardPath.java | 7 +- .../BlobStoreIndexShardSnapshot.java | 9 +- .../BlobStoreIndexShardSnapshots.java | 9 +- .../TransportNodesListShardStoreMetaData.java | 4 +- .../elasticsearch/ingest/IngestMetadata.java | 27 ++-- .../ingest/PipelineConfiguration.java | 13 +- .../java/org/elasticsearch/node/Node.java | 24 +-- .../plugins/DiscoveryPlugin.java | 2 + .../plugins/RepositoryPlugin.java | 3 +- .../repositories/RepositoriesModule.java | 9 +- .../blobstore/BlobStoreFormat.java | 17 +-- .../blobstore/BlobStoreRepository.java | 25 +-- .../blobstore/ChecksumBlobStoreFormat.java | 14 +- .../repositories/fs/FsRepository.java | 6 +- .../repositories/uri/URLRepository.java | 6 +- .../elasticsearch/script/ScriptMetaData.java | 35 +++-- .../elasticsearch/snapshots/SnapshotInfo.java | 9 +- .../org/elasticsearch/tribe/TribeService.java | 29 +++- .../node/tasks/TaskManagerTestCase.java | 4 +- .../OldIndexBackwardsCompatibilityIT.java | 4 +- .../cluster/ClusterChangedEventTests.java | 38 ++--- .../cluster/ClusterStateDiffIT.java | 16 +- .../cluster/metadata/IndexGraveyardTests.java | 4 +- .../cluster/metadata/IndexMetaDataTests.java | 4 +- .../metadata/IndexTemplateMetaDataTests.java | 2 +- .../cluster/metadata/MetaDataTests.java | 11 +- .../ClusterSerializationTests.java | 9 +- .../cluster/serialization/DiffableTests.java | 17 ++- .../common/util/IndexFolderUpgraderTests.java | 6 +- .../discovery/DiscoveryModuleTests.java | 6 +- .../zen/PublishClusterStateActionTests.java | 8 +- .../discovery/zen/ZenDiscoveryIT.java | 7 +- .../discovery/zen/ZenDiscoveryUnitTests.java | 5 +- .../gateway/DanglingIndicesStateTests.java | 10 +- .../gateway/GatewayMetaStateTests.java | 22 +-- .../gateway/MetaDataStateFormatTests.java | 17 ++- .../gateway/MetaStateServiceTests.java | 10 +- .../index/engine/InternalEngineTests.java | 2 +- .../index/shard/IndexShardTests.java | 3 +- .../ingest/IngestMetadataTests.java | 2 +- .../script/ScriptMetaDataTests.java | 4 +- .../snapshots/BlobStoreFormatIT.java | 31 ++-- .../DedicatedClusterSnapshotRestoreIT.java | 144 +++++++++++++----- .../snapshots/mockstore/MockRepository.java | 10 +- .../java/org/elasticsearch/tribe/TribeIT.java | 51 +++++-- .../tribe/TribeServiceTests.java | 32 ++-- .../azure/classic/AzureDiscoveryPlugin.java | 4 +- .../discovery/ec2/Ec2DiscoveryPlugin.java | 4 +- .../discovery/gce/GceDiscoveryPlugin.java | 4 +- .../azure/AzureRepositoryPlugin.java | 5 +- .../repositories/azure/AzureRepository.java | 6 +- .../azure/AzureRepositorySettingsTests.java | 4 +- .../gcs/GoogleCloudStoragePlugin.java | 5 +- .../gcs/GoogleCloudStorageRepository.java | 4 +- .../repositories/hdfs/HdfsPlugin.java | 5 +- .../repositories/hdfs/HdfsRepository.java | 10 +- .../repository/s3/S3RepositoryPlugin.java | 5 +- .../repositories/s3/S3Repository.java | 6 +- .../repositories/s3/S3RepositoryTests.java | 10 +- .../UpgradeClusterClientYamlTestSuiteIT.java | 5 + .../test/mixed_cluster/10_basic.yaml | 12 ++ .../test/old_cluster/10_basic.yaml | 22 +++ .../test/upgraded_cluster/10_basic.yaml | 12 ++ .../elasticsearch/test/ESIntegTestCase.java | 22 ++- .../org/elasticsearch/test/ESTestCase.java | 4 +- .../org/elasticsearch/test/OldIndexUtils.java | 4 +- .../test/TestCustomMetaData.java | 26 ++-- .../test/discovery/TestZenDiscovery.java | 9 +- .../test/rest/ESRestTestCase.java | 13 +- .../test/transport/MockTransportService.java | 3 +- .../search/MockSearchServiceTests.java | 11 +- 115 files changed, 1090 insertions(+), 751 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java rename core/src/main/java/org/elasticsearch/{common/xcontent/FromXContentBuilder.java => cluster/NamedDiff.java} (67%) diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index fbb6a8d18e8..74bf8f341f3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -59,7 +59,7 @@ public class ClusterRerouteResponse extends AcknowledgedResponse { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - state = ClusterState.Builder.readFrom(in, null); + state = ClusterState.readFrom(in, null); readAcknowledged(in); explanations = RoutingExplanations.readFrom(in); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java index f4d8b891b86..6d6f0da34b5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java @@ -52,7 +52,7 @@ public class ClusterStateResponse extends ActionResponse { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); clusterName = new ClusterName(in); - clusterState = ClusterState.Builder.readFrom(in, null); + clusterState = ClusterState.readFrom(in, null); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index 85282788898..5a63ce8d869 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -57,7 +57,7 @@ public class GetAliasesResponse extends ActionResponse { int valueSize = in.readVInt(); List value = new ArrayList<>(valueSize); for (int j = 0; j < valueSize; j++) { - value.add(AliasMetaData.Builder.readFrom(in)); + value.add(new AliasMetaData(in)); } aliasesBuilder.put(key, Collections.unmodifiableList(value)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index 3a29237faeb..6c2e4627523 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -104,7 +104,7 @@ public class GetIndexResponse extends ActionResponse { int valueSize = in.readVInt(); ImmutableOpenMap.Builder mappingEntryBuilder = ImmutableOpenMap.builder(); for (int j = 0; j < valueSize; j++) { - mappingEntryBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in)); + mappingEntryBuilder.put(in.readString(), new MappingMetaData(in)); } mappingsMapBuilder.put(key, mappingEntryBuilder.build()); } @@ -116,7 +116,7 @@ public class GetIndexResponse extends ActionResponse { int valueSize = in.readVInt(); List aliasEntryBuilder = new ArrayList<>(); for (int j = 0; j < valueSize; j++) { - aliasEntryBuilder.add(AliasMetaData.Builder.readFrom(in)); + aliasEntryBuilder.add(new AliasMetaData(in)); } aliasesMapBuilder.put(key, Collections.unmodifiableList(aliasEntryBuilder)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java index e092f1f148d..12975c765d0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -57,7 +57,7 @@ public class GetMappingsResponse extends ActionResponse { int valueSize = in.readVInt(); ImmutableOpenMap.Builder typeMapBuilder = ImmutableOpenMap.builder(); for (int j = 0; j < valueSize; j++) { - typeMapBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in)); + typeMapBuilder.put(in.readString(), new MappingMetaData(in)); } indexMapBuilder.put(key, typeMapBuilder.build()); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index a519bd8bf55..02b08b28f98 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -52,7 +52,7 @@ public class GetIndexTemplatesResponse extends ActionResponse implements ToXCont int size = in.readVInt(); indexTemplates = new ArrayList<>(size); for (int i = 0 ; i < size ; i++) { - indexTemplates.add(0, IndexTemplateMetaData.Builder.readFrom(in)); + indexTemplates.add(0, IndexTemplateMetaData.readFrom(in)); } } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java b/core/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java index f603a354f4b..3b66a294a50 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java @@ -52,7 +52,7 @@ public class GetPipelineResponse extends ActionResponse implements StatusToXCont int size = in.readVInt(); pipelines = new ArrayList<>(size); for (int i = 0; i < size; i++) { - pipelines.add(PipelineConfiguration.readPipelineConfiguration(in)); + pipelines.add(PipelineConfiguration.readFrom(in)); } } diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 79f39d143f5..2ca67174dee 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; @@ -140,6 +141,7 @@ public abstract class TransportClient extends AbstractClient { List entries = new ArrayList<>(); entries.addAll(NetworkModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); + entries.addAll(ClusterModule.getNamedWriteables()); entries.addAll(pluginsService.filterPlugins(Plugin.class).stream() .flatMap(p -> p.getNamedWriteables().stream()) .collect(Collectors.toList())); diff --git a/core/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java b/core/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java index f9d5f33cad6..aec3cb73039 100644 --- a/core/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java +++ b/core/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java @@ -40,12 +40,7 @@ public abstract class AbstractDiffable> implements Diffabl } } - @Override - public Diff readDiffFrom(StreamInput in) throws IOException { - return new CompleteDiff<>(this, in); - } - - public static > Diff readDiffFrom(T reader, StreamInput in) throws IOException { + public static > Diff readDiffFrom(Reader reader, StreamInput in) throws IOException { return new CompleteDiff(reader, in); } @@ -71,9 +66,9 @@ public abstract class AbstractDiffable> implements Diffabl /** * Read simple diff from the stream */ - public CompleteDiff(Diffable reader, StreamInput in) throws IOException { + public CompleteDiff(Reader reader, StreamInput in) throws IOException { if (in.readBoolean()) { - this.part = reader.readFrom(in); + this.part = reader.read(in); } else { this.part = null; } diff --git a/core/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java b/core/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java new file mode 100644 index 00000000000..2a3c619ea21 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Abstract diffable object with simple diffs implementation that sends the entire object if object has changed or + * nothing is object remained the same. Comparing to AbstractDiffable, this class also works with NamedWriteables + */ +public abstract class AbstractNamedDiffable & NamedWriteable> implements Diffable, NamedWriteable { + + @Override + public Diff diff(T previousState) { + if (this.get().equals(previousState)) { + return new CompleteNamedDiff<>(previousState.getWriteableName()); + } else { + return new CompleteNamedDiff<>(get()); + } + } + + public static & NamedWriteable> NamedDiff readDiffFrom(Class tClass, String name, StreamInput in) + throws IOException { + return new CompleteNamedDiff<>(tClass, name, in); + } + + private static class CompleteNamedDiff & NamedWriteable> implements NamedDiff { + + @Nullable + private final T part; + + private final String name; + + /** + * Creates simple diff with changes + */ + public CompleteNamedDiff(T part) { + this.part = part; + this.name = part.getWriteableName(); + } + + /** + * Creates simple diff without changes + */ + public CompleteNamedDiff(String name) { + this.part = null; + this.name = name; + } + + /** + * Read simple diff from the stream + */ + public CompleteNamedDiff(Class tClass, String name, StreamInput in) throws IOException { + if (in.readBoolean()) { + this.part = in.readNamedWriteable(tClass, name); + } else { + this.part = null; + } + this.name = name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (part != null) { + out.writeBoolean(true); + part.writeTo(out); + } else { + out.writeBoolean(false); + } + } + + @Override + public T apply(T part) { + if (this.part != null) { + return this.part; + } else { + return part; + } + } + + @Override + public String getWriteableName() { + return name; + } + } + + @SuppressWarnings("unchecked") + public T get() { + return (T) this; + } + +} diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 930991c443b..8fe8942662a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -22,7 +22,9 @@ package org.elasticsearch.cluster; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; @@ -30,6 +32,7 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; import org.elasticsearch.cluster.metadata.MetaDataMappingService; import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService; +import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.cluster.routing.DelayedAllocationService; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.allocation.AllocationService; @@ -52,15 +55,25 @@ import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocatio import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.plugins.ClusterPlugin; +import org.elasticsearch.script.ScriptMetaData; import org.elasticsearch.tasks.TaskResultsService; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.LinkedHashMap; @@ -94,6 +107,52 @@ public class ClusterModule extends AbstractModule { indexNameExpressionResolver = new IndexNameExpressionResolver(settings); } + + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + // Cluster State + registerClusterCustom(entries, SnapshotsInProgress.TYPE, SnapshotsInProgress::new, SnapshotsInProgress::readDiffFrom); + registerClusterCustom(entries, RestoreInProgress.TYPE, RestoreInProgress::new, RestoreInProgress::readDiffFrom); + registerClusterCustom(entries, SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress::new, + SnapshotDeletionsInProgress::readDiffFrom); + // Metadata + registerMetaDataCustom(entries, RepositoriesMetaData.TYPE, RepositoriesMetaData::new, RepositoriesMetaData::readDiffFrom); + registerMetaDataCustom(entries, IngestMetadata.TYPE, IngestMetadata::new, IngestMetadata::readDiffFrom); + registerMetaDataCustom(entries, ScriptMetaData.TYPE, ScriptMetaData::new, ScriptMetaData::readDiffFrom); + registerMetaDataCustom(entries, IndexGraveyard.TYPE, IndexGraveyard::new, IndexGraveyard::readDiffFrom); + return entries; + } + + public static List getNamedXWriteables() { + List entries = new ArrayList<>(); + // Metadata + entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(RepositoriesMetaData.TYPE), + RepositoriesMetaData::fromXContent)); + entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(IngestMetadata.TYPE), + IngestMetadata::fromXContent)); + entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(ScriptMetaData.TYPE), + ScriptMetaData::fromXContent)); + entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(IndexGraveyard.TYPE), + IndexGraveyard::fromXContent)); + return entries; + } + + private static void registerClusterCustom(List entries, String name, Reader reader, + Reader diffReader) { + registerCustom(entries, ClusterState.Custom.class, name, reader, diffReader); + } + + private static void registerMetaDataCustom(List entries, String name, Reader reader, + Reader diffReader) { + registerCustom(entries, MetaData.Custom.class, name, reader, diffReader); + } + + private static void registerCustom(List entries, Class category, String name, + Reader reader, Reader diffReader) { + entries.add(new Entry(category, name, reader)); + entries.add(new Entry(NamedDiff.class, name, diffReader)); + } + public IndexNameExpressionResolver getIndexNameExpressionResolver() { return indexNameExpressionResolver; } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index e567e985621..a2b876eca35 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -43,8 +43,12 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -86,36 +90,10 @@ import java.util.Set; */ public class ClusterState implements ToXContent, Diffable { - public static final ClusterState PROTO = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); + public static final ClusterState EMPTY_STATE = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); - public interface Custom extends Diffable, ToXContent { + public interface Custom extends Diffable, ToXContent, NamedWriteable { - String type(); - } - - private static final Map customPrototypes = new HashMap<>(); - - /** - * Register a custom index meta data factory. Make sure to call it from a static block. - */ - public static void registerPrototype(String type, Custom proto) { - customPrototypes.put(type, proto); - } - - static { - // register non plugin custom parts - registerPrototype(SnapshotsInProgress.TYPE, SnapshotsInProgress.PROTO); - registerPrototype(RestoreInProgress.TYPE, RestoreInProgress.PROTO); - registerPrototype(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.PROTO); - } - - public static T lookupPrototype(String type) { - @SuppressWarnings("unchecked") - T proto = (T) customPrototypes.get(type); - if (proto == null) { - throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "], node likely missing plugins"); - } - return proto; } public static final String UNKNOWN_UUID = "_na_"; @@ -659,53 +637,39 @@ public class ClusterState implements ToXContent, Diffable { * @param data input bytes * @param localNode used to set the local node in the cluster state. */ - public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException { - return readFrom(StreamInput.wrap(data), localNode); + public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode, NamedWriteableRegistry registry) throws IOException { + StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(data), registry); + return readFrom(in, localNode); } - - /** - * @param in input stream - * @param localNode used to set the local node in the cluster state. can be null. - */ - public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { - return PROTO.readFrom(in, localNode); - } } @Override - public Diff diff(ClusterState previousState) { + public Diff diff(ClusterState previousState) { return new ClusterStateDiff(previousState, this); } - @Override - public Diff readDiffFrom(StreamInput in) throws IOException { - return new ClusterStateDiff(in, this); + public static Diff readDiffFrom(StreamInput in, DiscoveryNode localNode) throws IOException { + return new ClusterStateDiff(in, localNode); } - public ClusterState readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { + public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { ClusterName clusterName = new ClusterName(in); Builder builder = new Builder(clusterName); builder.version = in.readLong(); builder.uuid = in.readString(); - builder.metaData = MetaData.Builder.readFrom(in); - builder.routingTable = RoutingTable.Builder.readFrom(in); - builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode); - builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in); + builder.metaData = MetaData.readFrom(in); + builder.routingTable = RoutingTable.readFrom(in); + builder.nodes = DiscoveryNodes.readFrom(in, localNode); + builder.blocks = new ClusterBlocks(in); int customSize = in.readVInt(); for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupPrototype(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); + Custom customIndexMetaData = in.readNamedWriteable(Custom.class); + builder.putCustom(customIndexMetaData.getWriteableName(), customIndexMetaData); } return builder.build(); } - @Override - public ClusterState readFrom(StreamInput in) throws IOException { - return readFrom(in, nodes.getLocalNode()); - } - @Override public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); @@ -727,8 +691,7 @@ public class ClusterState implements ToXContent, Diffable { if (omitSnapshotDeletions && cursor.key.equals(SnapshotDeletionsInProgress.TYPE)) { continue; } - out.writeString(cursor.key); - cursor.value.writeTo(out); + out.writeNamedWriteable(cursor.value); } } @@ -764,25 +727,26 @@ public class ClusterState implements ToXContent, Diffable { customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer()); } - public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException { + public ClusterStateDiff(StreamInput in, DiscoveryNode localNode) throws IOException { clusterName = new ClusterName(in); fromUuid = in.readString(); toUuid = in.readString(); toVersion = in.readLong(); - routingTable = proto.routingTable.readDiffFrom(in); - nodes = proto.nodes.readDiffFrom(in); - metaData = proto.metaData.readDiffFrom(in); - blocks = proto.blocks.readDiffFrom(in); + routingTable = RoutingTable.readDiffFrom(in); + nodes = DiscoveryNodes.readDiffFrom(in, localNode); + metaData = MetaData.readDiffFrom(in); + blocks = ClusterBlocks.readDiffFrom(in); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), new DiffableUtils.DiffableValueSerializer() { @Override public Custom read(StreamInput in, String key) throws IOException { - return lookupPrototype(key).readFrom(in); + return in.readNamedWriteable(Custom.class, key); } + @SuppressWarnings("unchecked") @Override public Diff readDiff(StreamInput in, String key) throws IOException { - return lookupPrototype(key).readDiffFrom(in); + return in.readNamedWriteable(NamedDiff.class, key); } }); } diff --git a/core/src/main/java/org/elasticsearch/cluster/Diffable.java b/core/src/main/java/org/elasticsearch/cluster/Diffable.java index b039f5e9b8b..57d5ea9ed1f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/Diffable.java +++ b/core/src/main/java/org/elasticsearch/cluster/Diffable.java @@ -34,13 +34,4 @@ public interface Diffable extends Writeable { */ Diff diff(T previousState); - /** - * Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput - */ - Diff readDiffFrom(StreamInput in) throws IOException; - - /** - * Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged. - */ - T readFrom(StreamInput in) throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index 234f22010fa..eee4a14ad19 100644 --- a/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable.Reader; import java.io.IOException; import java.util.ArrayList; @@ -135,22 +136,22 @@ public final class DiffableUtils { /** * Loads an object that represents difference between two ImmutableOpenMaps of Diffable objects using Diffable proto object */ - public static > MapDiff> readImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, T proto) throws IOException { - return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto)); + public static > MapDiff> readImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { + return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader)); } /** * Loads an object that represents difference between two ImmutableOpenIntMaps of Diffable objects using Diffable proto object */ - public static > MapDiff> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer keySerializer, T proto) throws IOException { - return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto)); + public static > MapDiff> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { + return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader)); } /** * Loads an object that represents difference between two Maps of Diffable objects using Diffable proto object */ - public static > MapDiff> readJdkMapDiff(StreamInput in, KeySerializer keySerializer, T proto) throws IOException { - return new JdkMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto)); + public static > MapDiff> readJdkMapDiff(StreamInput in, KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { + return new JdkMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader)); } /** @@ -629,25 +630,27 @@ public final class DiffableUtils { } /** - * Implementation of the ValueSerializer that uses a prototype object for reading operations + * Implementation of the ValueSerializer that wraps value and diff readers. * * Note: this implementation is ignoring the key. */ - public static class DiffablePrototypeValueReader> extends DiffableValueSerializer { - private final V proto; + public static class DiffableValueReader> extends DiffableValueSerializer { + private final Reader reader; + private final Reader> diffReader; - public DiffablePrototypeValueReader(V proto) { - this.proto = proto; + public DiffableValueReader(Reader reader, Reader> diffReader) { + this.reader = reader; + this.diffReader = diffReader; } @Override public V read(StreamInput in, K key) throws IOException { - return proto.readFrom(in); + return reader.read(in); } @Override public Diff readDiff(StreamInput in, K key) throws IOException { - return proto.readDiffFrom(in); + return diffReader.read(in); } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/FromXContentBuilder.java b/core/src/main/java/org/elasticsearch/cluster/NamedDiff.java similarity index 67% rename from core/src/main/java/org/elasticsearch/common/xcontent/FromXContentBuilder.java rename to core/src/main/java/org/elasticsearch/cluster/NamedDiff.java index 0b0370b490e..a5dda7ba716 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/FromXContentBuilder.java +++ b/core/src/main/java/org/elasticsearch/cluster/NamedDiff.java @@ -17,18 +17,13 @@ * under the License. */ -package org.elasticsearch.common.xcontent; +package org.elasticsearch.cluster; -import org.elasticsearch.common.ParseFieldMatcher; - -import java.io.IOException; +import org.elasticsearch.common.io.stream.NamedWriteable; /** - * Indicates that the class supports XContent deserialization. + * Diff that also support NamedWriteable interface */ -public interface FromXContentBuilder { - /** - * Parses an object with the type T from parser - */ - T fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException; +public interface NamedDiff> extends Diff, NamedWriteable { + } diff --git a/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java b/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java index 55a09f87f75..55e70dbe644 100644 --- a/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java @@ -39,12 +39,10 @@ import java.util.Objects; /** * Meta data about restore processes that are currently executing */ -public class RestoreInProgress extends AbstractDiffable implements Custom { +public class RestoreInProgress extends AbstractNamedDiffable implements Custom { public static final String TYPE = "restore"; - public static final RestoreInProgress PROTO = new RestoreInProgress(); - private final List entries; /** @@ -377,15 +375,15 @@ public class RestoreInProgress extends AbstractDiffable implements Custo * {@inheritDoc} */ @Override - public String type() { + public String getWriteableName() { return TYPE; } - /** - * {@inheritDoc} - */ - @Override - public RestoreInProgress readFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(Custom.class, TYPE, in); + } + + public RestoreInProgress(StreamInput in) throws IOException { Entry[] entries = new Entry[in.readVInt()]; for (int i = 0; i < entries.length; i++) { Snapshot snapshot = new Snapshot(in); @@ -404,7 +402,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo } entries[i] = new Entry(snapshot, state, Collections.unmodifiableList(indexBuilder), builder.build()); } - return new RestoreInProgress(entries); + this.entries = Arrays.asList(entries); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index a0c006f9c71..de3c2ae9ed8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -36,10 +36,9 @@ import java.util.Objects; /** * A class that represents the snapshot deletions that are in progress in the cluster. */ -public class SnapshotDeletionsInProgress extends AbstractDiffable implements Custom { +public class SnapshotDeletionsInProgress extends AbstractNamedDiffable implements Custom { public static final String TYPE = "snapshot_deletions"; - public static final SnapshotDeletionsInProgress PROTO = new SnapshotDeletionsInProgress(Collections.emptyList()); // the version where SnapshotDeletionsInProgress was introduced public static final Version VERSION_INTRODUCED = Version.V_5_2_0_UNRELEASED; @@ -98,7 +97,7 @@ public class SnapshotDeletionsInProgress extends AbstractDiffable implem } @Override - public String type() { + public String getWriteableName() { return TYPE; } @@ -120,16 +119,15 @@ public class SnapshotDeletionsInProgress extends AbstractDiffable implem return 31 + entries.hashCode(); } - @Override - public Custom readFrom(StreamInput in) throws IOException { - return new SnapshotDeletionsInProgress(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeList(entries); } + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(Custom.class, TYPE, in); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startArray(TYPE); diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index b5d955ecc06..0ac1e8e4090 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -44,11 +44,9 @@ import java.util.Map; /** * Meta data about snapshots that are currently executing */ -public class SnapshotsInProgress extends AbstractDiffable implements Custom { +public class SnapshotsInProgress extends AbstractNamedDiffable implements Custom { public static final String TYPE = "snapshots"; - public static final SnapshotsInProgress PROTO = new SnapshotsInProgress(); - // denotes an undefined repository state id, which will happen when receiving a cluster state with // a snapshot in progress from a pre 5.2.x node public static final long UNDEFINED_REPOSITORY_STATE_ID = -2L; @@ -377,12 +375,15 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus } @Override - public String type() { + public String getWriteableName() { return TYPE; } - @Override - public SnapshotsInProgress readFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(Custom.class, TYPE, in); + } + + public SnapshotsInProgress(StreamInput in) throws IOException { Entry[] entries = new Entry[in.readVInt()]; for (int i = 0; i < entries.length; i++) { Snapshot snapshot = new Snapshot(in); @@ -416,7 +417,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus repositoryStateId, builder.build()); } - return new SnapshotsInProgress(entries); + this.entries = Arrays.asList(entries); } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index 12e6ee0f7ec..2bdf560580b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.block; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -48,8 +49,6 @@ import static java.util.stream.Stream.concat; public class ClusterBlocks extends AbstractDiffable { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(emptySet(), ImmutableOpenMap.of()); - public static final ClusterBlocks PROTO = EMPTY_CLUSTER_BLOCK; - private final Set global; private final ImmutableOpenMap> indicesBlocks; @@ -59,23 +58,7 @@ public class ClusterBlocks extends AbstractDiffable { ClusterBlocks(Set global, ImmutableOpenMap> indicesBlocks) { this.global = global; this.indicesBlocks = indicesBlocks; - - levelHolders = new ImmutableLevelHolder[ClusterBlockLevel.values().length]; - for (final ClusterBlockLevel level : ClusterBlockLevel.values()) { - Predicate containsLevel = block -> block.contains(level); - Set newGlobal = unmodifiableSet(global.stream() - .filter(containsLevel) - .collect(toSet())); - - ImmutableOpenMap.Builder> indicesBuilder = ImmutableOpenMap.builder(); - for (ObjectObjectCursor> entry : indicesBlocks) { - indicesBuilder.put(entry.key, unmodifiableSet(entry.value.stream() - .filter(containsLevel) - .collect(toSet()))); - } - - levelHolders[level.id()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build()); - } + levelHolders = generateLevelHolders(global, indicesBlocks); } public Set global() { @@ -98,6 +81,27 @@ public class ClusterBlocks extends AbstractDiffable { return indices(level).getOrDefault(index, emptySet()); } + private static ImmutableLevelHolder[] generateLevelHolders(Set global, + ImmutableOpenMap> indicesBlocks) { + ImmutableLevelHolder[] levelHolders = new ImmutableLevelHolder[ClusterBlockLevel.values().length]; + for (final ClusterBlockLevel level : ClusterBlockLevel.values()) { + Predicate containsLevel = block -> block.contains(level); + Set newGlobal = unmodifiableSet(global.stream() + .filter(containsLevel) + .collect(toSet())); + + ImmutableOpenMap.Builder> indicesBuilder = ImmutableOpenMap.builder(); + for (ObjectObjectCursor> entry : indicesBlocks) { + indicesBuilder.put(entry.key, unmodifiableSet(entry.value.stream() + .filter(containsLevel) + .collect(toSet()))); + } + + levelHolders[level.id()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build()); + } + return levelHolders; + } + /** * Returns true if one of the global blocks as its disable state persistence flag set. */ @@ -239,15 +243,16 @@ public class ClusterBlocks extends AbstractDiffable { } } - @Override - public ClusterBlocks readFrom(StreamInput in) throws IOException { + public ClusterBlocks(StreamInput in) throws IOException { Set global = readBlockSet(in); int size = in.readVInt(); ImmutableOpenMap.Builder> indicesBuilder = ImmutableOpenMap.builder(size); for (int j = 0; j < size; j++) { indicesBuilder.put(in.readString().intern(), readBlockSet(in)); } - return new ClusterBlocks(global, indicesBuilder.build()); + this.global = global; + this.indicesBlocks = indicesBuilder.build(); + levelHolders = generateLevelHolders(global, indicesBlocks); } private static Set readBlockSet(StreamInput in) throws IOException { @@ -259,6 +264,10 @@ public class ClusterBlocks extends AbstractDiffable { return unmodifiableSet(blocks); } + public static Diff readDiffFrom(StreamInput in) throws IOException { + return AbstractDiffable.readDiffFrom(ClusterBlocks::new, in); + } + static class ImmutableLevelHolder { static final ImmutableLevelHolder EMPTY = new ImmutableLevelHolder(emptySet(), ImmutableOpenMap.of()); @@ -383,9 +392,5 @@ public class ClusterBlocks extends AbstractDiffable { } return new ClusterBlocks(unmodifiableSet(new HashSet<>(global)), indicesBuilder.build()); } - - public static ClusterBlocks readClusterBlocks(StreamInput in) throws IOException { - return PROTO.readFrom(in); - } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java index 647b355cc0e..8071871fbfe 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; @@ -41,8 +42,6 @@ import static java.util.Collections.emptySet; public class AliasMetaData extends AbstractDiffable { - public static final AliasMetaData PROTO = new AliasMetaData("", null, null, null); - private final String alias; private final CompressedXContent filter; @@ -173,22 +172,29 @@ public class AliasMetaData extends AbstractDiffable { } - @Override - public AliasMetaData readFrom(StreamInput in) throws IOException { - String alias = in.readString(); - CompressedXContent filter = null; + public AliasMetaData(StreamInput in) throws IOException { + alias = in.readString(); if (in.readBoolean()) { filter = CompressedXContent.readCompressedString(in); + } else { + filter = null; } - String indexRouting = null; if (in.readBoolean()) { indexRouting = in.readString(); + } else { + indexRouting = null; } - String searchRouting = null; if (in.readBoolean()) { searchRouting = in.readString(); + searchRoutingValues = Collections.unmodifiableSet(Strings.splitStringByCommaToSet(searchRouting)); + } else { + searchRouting = null; + searchRoutingValues = emptySet(); } - return new AliasMetaData(alias, filter, indexRouting, searchRouting); + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(AliasMetaData::new, in); } public static class Builder { @@ -327,14 +333,6 @@ public class AliasMetaData extends AbstractDiffable { } return builder.build(); } - - public void writeTo(AliasMetaData aliasMetaData, StreamOutput out) throws IOException { - aliasMetaData.writeTo(out); - } - - public static AliasMetaData readFrom(StreamInput in) throws IOException { - return PROTO.readFrom(in); - } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java index 1a2cfe90ad6..722af7f8750 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcherSupplier; @@ -67,7 +68,6 @@ public final class IndexGraveyard implements MetaData.Custom { 500, // the default maximum number of tombstones Setting.Property.NodeScope); - public static final IndexGraveyard PROTO = new IndexGraveyard(new ArrayList<>()); public static final String TYPE = "index-graveyard"; private static final ParseField TOMBSTONES_FIELD = new ParseField("tombstones"); private static final ObjectParser, ParseFieldMatcherSupplier> GRAVEYARD_PARSER; @@ -83,7 +83,7 @@ public final class IndexGraveyard implements MetaData.Custom { tombstones = Collections.unmodifiableList(list); } - private IndexGraveyard(final StreamInput in) throws IOException { + public IndexGraveyard(final StreamInput in) throws IOException { final int queueSize = in.readVInt(); List tombstones = new ArrayList<>(queueSize); for (int i = 0; i < queueSize; i++) { @@ -92,12 +92,8 @@ public final class IndexGraveyard implements MetaData.Custom { this.tombstones = Collections.unmodifiableList(tombstones); } - public static IndexGraveyard fromStream(final StreamInput in) throws IOException { - return new IndexGraveyard(in); - } - @Override - public String type() { + public String getWriteableName() { return TYPE; } @@ -144,7 +140,7 @@ public final class IndexGraveyard implements MetaData.Custom { return builder.endArray(); } - public IndexGraveyard fromXContent(final XContentParser parser) throws IOException { + public static IndexGraveyard fromXContent(final XContentParser parser) throws IOException { return new IndexGraveyard(GRAVEYARD_PARSER.parse(parser, () -> ParseFieldMatcher.STRICT)); } @@ -161,19 +157,13 @@ public final class IndexGraveyard implements MetaData.Custom { } } - @Override - public IndexGraveyard readFrom(final StreamInput in) throws IOException { - return new IndexGraveyard(in); - } - @Override @SuppressWarnings("unchecked") public Diff diff(final MetaData.Custom previous) { return new IndexGraveyardDiff((IndexGraveyard) previous, this); } - @Override - public Diff readDiffFrom(final StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(final StreamInput in) throws IOException { return new IndexGraveyardDiff(in); } @@ -273,7 +263,7 @@ public final class IndexGraveyard implements MetaData.Custom { /** * A class representing a diff of two IndexGraveyard objects. */ - public static final class IndexGraveyardDiff implements Diff { + public static final class IndexGraveyardDiff implements NamedDiff { private final List added; private final int removedCount; @@ -349,6 +339,11 @@ public final class IndexGraveyard implements MetaData.Custom { public int getRemovedCount() { return removedCount; } + + @Override + public String getWriteableName() { + return TYPE; + } } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index b28ec4a0c86..8c2dc3d47ed 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.LongArrayList; import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.Version; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.Diff; @@ -34,7 +33,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.node.DiscoveryNodeFilters; import org.elasticsearch.cluster.routing.allocation.IndexMetaDataUpdater; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -46,7 +44,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; -import org.elasticsearch.common.xcontent.FromXContentBuilder; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -78,8 +75,12 @@ import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; -public class IndexMetaData implements Diffable, FromXContentBuilder, ToXContent { +public class IndexMetaData implements Diffable, ToXContent { + /** + * This class will be removed in v7.0 + */ + @Deprecated public interface Custom extends Diffable, ToXContent { String type(); @@ -88,6 +89,16 @@ public class IndexMetaData implements Diffable, FromXContentBuild Custom fromXContent(XContentParser parser) throws IOException; + /** + * Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput + */ + Diff readDiffFrom(StreamInput in) throws IOException; + + /** + * Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged. + */ + Custom readFrom(StreamInput in) throws IOException; + /** * Merges from this to another, with this being more important, i.e., if something exists in this and another, * this will prevail. @@ -249,10 +260,6 @@ public class IndexMetaData implements Diffable, FromXContentBuild Setting.Property.Dynamic, Setting.Property.IndexScope); - public static final IndexMetaData PROTO = IndexMetaData.builder("") - .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) - .numberOfShards(1).numberOfReplicas(0).build(); - public static final String KEY_IN_SYNC_ALLOCATIONS = "in_sync_allocations"; static final String KEY_VERSION = "version"; static final String KEY_ROUTING_NUM_SHARDS = "routing_num_shards"; @@ -567,13 +574,11 @@ public class IndexMetaData implements Diffable, FromXContentBuild return new IndexMetaDataDiff(previousState, this); } - @Override - public Diff readDiffFrom(StreamInput in) throws IOException { + public static Diff readDiffFrom(StreamInput in) throws IOException { return new IndexMetaDataDiff(in); } - @Override - public IndexMetaData fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + public static IndexMetaData fromXContent(XContentParser parser) throws IOException { return Builder.fromXContent(parser); } @@ -617,8 +622,10 @@ public class IndexMetaData implements Diffable, FromXContentBuild state = State.fromId(in.readByte()); settings = Settings.readSettingsFromStream(in); primaryTerms = in.readVLongArray(); - mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO); - aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO); + mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData::new, + MappingMetaData::readDiffFrom); + aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData::new, + AliasMetaData::readDiffFrom); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), new DiffableUtils.DiffableValueSerializer() { @Override @@ -626,6 +633,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild return lookupPrototypeSafe(key).readFrom(in); } + @SuppressWarnings("unchecked") @Override public Diff readDiff(StreamInput in, String key) throws IOException { return lookupPrototypeSafe(key).readDiffFrom(in); @@ -665,8 +673,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild } } - @Override - public IndexMetaData readFrom(StreamInput in) throws IOException { + public static IndexMetaData readFrom(StreamInput in) throws IOException { Builder builder = new Builder(in.readString()); builder.version(in.readLong()); builder.setRoutingNumShards(in.readInt()); @@ -675,12 +682,12 @@ public class IndexMetaData implements Diffable, FromXContentBuild builder.primaryTerms(in.readVLongArray()); int mappingsSize = in.readVInt(); for (int i = 0; i < mappingsSize; i++) { - MappingMetaData mappingMd = MappingMetaData.PROTO.readFrom(in); + MappingMetaData mappingMd = new MappingMetaData(in); builder.putMapping(mappingMd); } int aliasesSize = in.readVInt(); for (int i = 0; i < aliasesSize; i++) { - AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); + AliasMetaData aliasMd = new AliasMetaData(in); builder.putAlias(aliasMd); } int customSize = in.readVInt(); @@ -1200,10 +1207,6 @@ public class IndexMetaData implements Diffable, FromXContentBuild } return builder.build(); } - - public static IndexMetaData readFrom(StreamInput in) throws IOException { - return PROTO.readFrom(in); - } } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index 7ffa4878fe7..4ba244e107d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -51,7 +52,6 @@ import java.util.Set; public class IndexTemplateMetaData extends AbstractDiffable { - public static final IndexTemplateMetaData PROTO = IndexTemplateMetaData.builder("").build(); private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(IndexTemplateMetaData.class)); private final String name; @@ -206,8 +206,7 @@ public class IndexTemplateMetaData extends AbstractDiffable readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(IndexTemplateMetaData::readFrom, in); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); @@ -525,10 +528,6 @@ public class IndexTemplateMetaData extends AbstractDiffable { - public static final MappingMetaData PROTO = new MappingMetaData(); - public static class Routing { public static final Routing EMPTY = new Routing(false); @@ -228,11 +227,11 @@ public class MappingMetaData extends AbstractDiffable { return result; } - public MappingMetaData readFrom(StreamInput in) throws IOException { - String type = in.readString(); - CompressedXContent source = CompressedXContent.readCompressedString(in); + public MappingMetaData(StreamInput in) throws IOException { + type = in.readString(); + source = CompressedXContent.readCompressedString(in); // routing - Routing routing = new Routing(in.readBoolean()); + routing = new Routing(in.readBoolean()); if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // timestamp boolean enabled = in.readBoolean(); @@ -243,9 +242,11 @@ public class MappingMetaData extends AbstractDiffable { in.readOptionalString(); // defaultTimestamp in.readOptionalBoolean(); // ignoreMissing } + hasParentField = in.readBoolean(); + } - final boolean hasParentField = in.readBoolean(); - return new MappingMetaData(type, source, routing, hasParentField); + public static Diff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(MappingMetaData::new, in); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index f091159aaec..d00be01ed58 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -27,37 +27,32 @@ import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; import org.elasticsearch.cluster.DiffableUtils; -import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; -import org.elasticsearch.common.xcontent.FromXContentBuilder; +import org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.script.ScriptMetaData; import java.io.IOException; import java.util.ArrayList; @@ -69,18 +64,15 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; -import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; -import static org.elasticsearch.common.util.set.Sets.newHashSet; -public class MetaData implements Iterable, Diffable, FromXContentBuilder, ToXContent { +public class MetaData implements Iterable, Diffable, ToXContent { - public static final MetaData PROTO = builder().build(); + private static final Logger logger = Loggers.getLogger(MetaData.class); public static final String ALL = "_all"; @@ -118,48 +110,11 @@ public class MetaData implements Iterable, Diffable, Fr */ public static EnumSet ALL_CONTEXTS = EnumSet.allOf(XContentContext.class); - public interface Custom extends Diffable, ToXContent { - - String type(); - - Custom fromXContent(XContentParser parser) throws IOException; + public interface Custom extends Diffable, ToXContent, NamedWriteable { EnumSet context(); } - public static Map customPrototypes = new HashMap<>(); - - static { - // register non plugin custom metadata - registerPrototype(RepositoriesMetaData.TYPE, RepositoriesMetaData.PROTO); - registerPrototype(IngestMetadata.TYPE, IngestMetadata.PROTO); - registerPrototype(ScriptMetaData.TYPE, ScriptMetaData.PROTO); - registerPrototype(IndexGraveyard.TYPE, IndexGraveyard.PROTO); - } - - /** - * Register a custom index meta data factory. Make sure to call it from a static block. - */ - public static void registerPrototype(String type, Custom proto) { - customPrototypes.put(type, proto); - } - - @Nullable - public static T lookupPrototype(String type) { - //noinspection unchecked - return (T) customPrototypes.get(type); - } - - public static T lookupPrototypeSafe(String type) { - //noinspection unchecked - T proto = (T) customPrototypes.get(type); - if (proto == null) { - throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "], node likely missing plugins"); - } - return proto; - } - - public static final Setting SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, Property.Dynamic, Property.NodeScope); @@ -596,14 +551,14 @@ public class MetaData implements Iterable, Diffable, Fr // Check if any persistent metadata needs to be saved int customCount1 = 0; for (ObjectObjectCursor cursor : metaData1.customs) { - if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) { + if (cursor.value.context().contains(XContentContext.GATEWAY)) { if (!cursor.value.equals(metaData2.custom(cursor.key))) return false; customCount1++; } } int customCount2 = 0; - for (ObjectObjectCursor cursor : metaData2.customs) { - if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) { + for (ObjectCursor cursor : metaData2.customs.values()) { + if (cursor.value.context().contains(XContentContext.GATEWAY)) { customCount2++; } } @@ -616,13 +571,11 @@ public class MetaData implements Iterable, Diffable, Fr return new MetaDataDiff(previousState, this); } - @Override - public Diff readDiffFrom(StreamInput in) throws IOException { + public static Diff readDiffFrom(StreamInput in) throws IOException { return new MetaDataDiff(in); } - @Override - public MetaData fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + public static MetaData fromXContent(XContentParser parser) throws IOException { return Builder.fromXContent(parser); } @@ -659,18 +612,21 @@ public class MetaData implements Iterable, Diffable, Fr version = in.readLong(); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); - indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexMetaData.PROTO); - templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData.PROTO); + indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexMetaData::readFrom, + IndexMetaData::readDiffFrom); + templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData::readFrom, + IndexTemplateMetaData::readDiffFrom); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), new DiffableUtils.DiffableValueSerializer() { @Override public Custom read(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readFrom(in); + return in.readNamedWriteable(Custom.class, key); } + @SuppressWarnings("unchecked") @Override public Diff readDiff(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readDiffFrom(in); + return in.readNamedWriteable(NamedDiff.class, key); } }); } @@ -700,8 +656,7 @@ public class MetaData implements Iterable, Diffable, Fr } } - @Override - public MetaData readFrom(StreamInput in) throws IOException { + public static MetaData readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); builder.version = in.readLong(); builder.clusterUUID = in.readString(); @@ -709,17 +664,16 @@ public class MetaData implements Iterable, Diffable, Fr builder.persistentSettings(readSettingsFromStream(in)); int size = in.readVInt(); for (int i = 0; i < size; i++) { - builder.put(IndexMetaData.Builder.readFrom(in), false); + builder.put(IndexMetaData.readFrom(in), false); } size = in.readVInt(); for (int i = 0; i < size; i++) { - builder.put(IndexTemplateMetaData.Builder.readFrom(in)); + builder.put(IndexTemplateMetaData.readFrom(in)); } int customSize = in.readVInt(); for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); + Custom customIndexMetaData = in.readNamedWriteable(Custom.class); + builder.putCustom(customIndexMetaData.getWriteableName(), customIndexMetaData); } return builder.build(); } @@ -740,8 +694,7 @@ public class MetaData implements Iterable, Diffable, Fr } out.writeVInt(customs.size()); for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); + out.writeNamedWriteable(cursor.value); } } @@ -1051,8 +1004,7 @@ public class MetaData implements Iterable, Diffable, Fr } for (ObjectObjectCursor cursor : metaData.customs()) { - Custom proto = lookupPrototypeSafe(cursor.key); - if (proto.context().contains(context)) { + if (cursor.value.context().contains(context)) { builder.startObject(cursor.key); cursor.value.toXContent(builder, params); builder.endObject(); @@ -1103,14 +1055,12 @@ public class MetaData implements Iterable, Diffable, Fr builder.put(IndexTemplateMetaData.Builder.fromXContent(parser, parser.currentName())); } } else { - // check if its a custom index metadata - Custom proto = lookupPrototype(currentFieldName); - if (proto == null) { - //TODO warn + try { + Custom custom = parser.namedObject(Custom.class, currentFieldName, null); + builder.putCustom(custom.getWriteableName(), custom); + } catch (UnknownNamedObjectException ex) { + logger.warn("Skipping unknown custom object with type {}", currentFieldName); parser.skipChildren(); - } else { - Custom custom = proto.fromXContent(parser); - builder.putCustom(custom.type(), custom); } } } else if (token.isValue()) { @@ -1127,10 +1077,6 @@ public class MetaData implements Iterable, Diffable, Fr } return builder.build(); } - - public static MetaData readFrom(StreamInput in) throws IOException { - return PROTO.readFrom(in); - } } private static final ToXContent.Params FORMAT_PARAMS; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java index 2dc842ceaae..67909bff614 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java @@ -21,6 +21,9 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.AbstractNamedDiffable; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -39,12 +42,10 @@ import java.util.List; /** * Contains metadata about registered snapshot repositories */ -public class RepositoriesMetaData extends AbstractDiffable implements MetaData.Custom { +public class RepositoriesMetaData extends AbstractNamedDiffable implements Custom { public static final String TYPE = "repositories"; - public static final RepositoriesMetaData PROTO = new RepositoriesMetaData(); - private final List repositories; /** @@ -100,20 +101,20 @@ public class RepositoriesMetaData extends AbstractDiffable implements Me * {@inheritDoc} */ @Override - public String type() { + public String getWriteableName() { return TYPE; } - /** - * {@inheritDoc} - */ - @Override - public Custom readFrom(StreamInput in) throws IOException { + public RepositoriesMetaData(StreamInput in) throws IOException { RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()]; for (int i = 0; i < repository.length; i++) { - repository[i] = RepositoryMetaData.readFrom(in); + repository[i] = new RepositoryMetaData(in); } - return new RepositoriesMetaData(repository); + this.repositories = Arrays.asList(repository); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(Custom.class, TYPE, in); } /** @@ -127,11 +128,7 @@ public class RepositoriesMetaData extends AbstractDiffable implements Me } } - /** - * {@inheritDoc} - */ - @Override - public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException { + public static RepositoriesMetaData fromXContent(XContentParser parser) throws IOException { XContentParser.Token token; List repository = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java index 3c13a10c1cf..847db915b8b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java @@ -73,17 +73,10 @@ public class RepositoryMetaData { } - /** - * Reads repository metadata from stream input - * - * @param in stream input - * @return repository metadata - */ - public static RepositoryMetaData readFrom(StreamInput in) throws IOException { - String name = in.readString(); - String type = in.readString(); - Settings settings = Settings.readSettingsFromStream(in); - return new RepositoryMetaData(name, type, settings); + public RepositoryMetaData(StreamInput in) throws IOException { + name = in.readString(); + type = in.readString(); + settings = Settings.readSettingsFromStream(in); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 6d80a9573ba..4829c9c9753 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -46,7 +47,6 @@ import java.util.Map; public class DiscoveryNodes extends AbstractDiffable implements Iterable { public static final DiscoveryNodes EMPTY_NODES = builder().build(); - public static final DiscoveryNodes PROTO = EMPTY_NODES; private final ImmutableOpenMap nodes; private final ImmutableOpenMap dataNodes; @@ -523,7 +523,7 @@ public class DiscoveryNodes extends AbstractDiffable implements } } - private DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { + public static DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { Builder builder = new Builder(); if (in.readBoolean()) { builder.masterNodeId(in.readString()); @@ -546,9 +546,8 @@ public class DiscoveryNodes extends AbstractDiffable implements return builder.build(); } - @Override - public DiscoveryNodes readFrom(StreamInput in) throws IOException { - return readFrom(in, getLocalNode()); + public static Diff readDiffFrom(StreamInput in, DiscoveryNode localNode) throws IOException { + return AbstractDiffable.readDiffFrom(in1 -> readFrom(in1, localNode), in); } public static Builder builder() { @@ -678,10 +677,6 @@ public class DiscoveryNodes extends AbstractDiffable implements ); } - public static DiscoveryNodes readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { - return PROTO.readFrom(in, localNode); - } - public boolean isLocalNodeElectedMaster() { return masterNodeId != null && masterNodeId.equals(localNodeId); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 58ee6d70f2c..1d137d30dba 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -25,6 +25,7 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.RecoverySource.LocalShardsRecoverySource; @@ -63,8 +64,6 @@ import java.util.Set; */ public class IndexRoutingTable extends AbstractDiffable implements Iterable { - public static final IndexRoutingTable PROTO = builder(new Index("", "_na_")).build(); - private final Index index; private final ShardShuffler shuffler; @@ -319,8 +318,7 @@ public class IndexRoutingTable extends AbstractDiffable imple return result; } - @Override - public IndexRoutingTable readFrom(StreamInput in) throws IOException { + public static IndexRoutingTable readFrom(StreamInput in) throws IOException { Index index = new Index(in); Builder builder = new Builder(index); @@ -332,6 +330,10 @@ public class IndexRoutingTable extends AbstractDiffable imple return builder.build(); } + public static Diff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(IndexRoutingTable::readFrom, in); + } + @Override public void writeTo(StreamOutput out) throws IOException { index.writeTo(out); @@ -354,17 +356,6 @@ public class IndexRoutingTable extends AbstractDiffable imple this.index = index; } - /** - * Reads an {@link IndexRoutingTable} from an {@link StreamInput} - * - * @param in {@link StreamInput} to read the {@link IndexRoutingTable} from - * @return {@link IndexRoutingTable} read - * @throws IOException if something happens during read - */ - public static IndexRoutingTable readFrom(StreamInput in) throws IOException { - return PROTO.readFrom(in); - } - /** * Initializes a new empty index, as if it was created from an API. */ diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 051fd12a12b..d4aa89b99c5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -56,8 +56,6 @@ import java.util.function.Predicate; */ public class RoutingTable implements Iterable, Diffable { - public static RoutingTable PROTO = builder().build(); - public static final RoutingTable EMPTY_ROUTING_TABLE = builder().build(); private final long version; @@ -349,18 +347,16 @@ public class RoutingTable implements Iterable, Diffable readDiffFrom(StreamInput in) throws IOException { + public static Diff readDiffFrom(StreamInput in) throws IOException { return new RoutingTableDiff(in); } - @Override - public RoutingTable readFrom(StreamInput in) throws IOException { + public static RoutingTable readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); builder.version = in.readLong(); int size = in.readVInt(); for (int i = 0; i < size; i++) { - IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in); + IndexRoutingTable index = IndexRoutingTable.readFrom(in); builder.add(index); } @@ -389,7 +385,8 @@ public class RoutingTable implements Iterable, Diffable, Diffable C readNamedWriteable(Class categoryClass) throws IOException { String name = readString(); + return readNamedWriteable(categoryClass, name); + } + + @Override + public C readNamedWriteable(@SuppressWarnings("unused") Class categoryClass, + @SuppressWarnings("unused") String name) throws IOException { Writeable.Reader reader = namedWriteableRegistry.getReader(categoryClass, name); C c = reader.read(this); if (c == null) { throw new IOException( - "Writeable.Reader [" + reader + "] returned null which is not allowed and probably means it screwed up the stream."); + "Writeable.Reader [" + reader + "] returned null which is not allowed and probably means it screwed up the stream."); } assert name.equals(c.getWriteableName()) : c + " claims to have a different name [" + c.getWriteableName() - + "] than it was read from [" + name + "]."; + + "] than it was read from [" + name + "]."; return c; } } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index f20c372ed10..e33c3ed840a 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -825,6 +825,22 @@ public abstract class StreamInput extends InputStream { throw new UnsupportedOperationException("can't read named writeable from StreamInput"); } + /** + * Reads a {@link NamedWriteable} from the current stream with the given name. It is assumed that the caller obtained the name + * from other source, so it's not read from the stream. The name is used for looking for + * the corresponding entry in the registry by name, so that the proper object can be read and returned. + * Default implementation throws {@link UnsupportedOperationException} as StreamInput doesn't hold a registry. + * Use {@link FilterInputStream} instead which wraps a stream and supports a {@link NamedWriteableRegistry} too. + * + * Prefer {@link StreamInput#readNamedWriteable(Class)} and {@link StreamOutput#writeNamedWriteable(NamedWriteable)} unless you + * have a compelling reason to use this method instead. + */ + @Nullable + public C readNamedWriteable(@SuppressWarnings("unused") Class categoryClass, + @SuppressWarnings("unused") String name) throws IOException { + throw new UnsupportedOperationException("can't read named writeable from StreamInput"); + } + /** * Reads an optional {@link NamedWriteable}. */ diff --git a/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java index 2712aef8233..528982385ac 100644 --- a/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java +++ b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java @@ -26,6 +26,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -86,7 +87,7 @@ public class IndexFolderUpgrader { void upgrade(final String indexFolderName) throws IOException { for (NodeEnvironment.NodePath nodePath : nodeEnv.nodePaths()) { final Path indexFolderPath = nodePath.indicesPath.resolve(indexFolderName); - final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, indexFolderPath); + final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, indexFolderPath); if (indexMetaData != null) { final Index index = indexMetaData.getIndex(); if (needsUpgrade(index, indexFolderName)) { diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 98ce54428c7..ea3ae0c919b 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -30,6 +30,7 @@ import java.util.function.Supplier; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -54,8 +55,9 @@ public class DiscoveryModule { private final Discovery discovery; - public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, NetworkService networkService, - ClusterService clusterService, List plugins) { + public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, ClusterService clusterService, + List plugins) { final UnicastHostsProvider hostsProvider; Map> hostProviders = new HashMap<>(); @@ -78,10 +80,12 @@ public class DiscoveryModule { } Map> discoveryTypes = new HashMap<>(); - discoveryTypes.put("zen", () -> new ZenDiscovery(settings, threadPool, transportService, clusterService, hostsProvider)); + discoveryTypes.put("zen", + () -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, clusterService, hostsProvider)); discoveryTypes.put("none", () -> new NoneDiscovery(settings, clusterService, clusterService.getClusterSettings())); for (DiscoveryPlugin plugin : plugins) { - plugin.getDiscoveryTypes(threadPool, transportService, clusterService, hostsProvider).entrySet().forEach(entry -> { + plugin.getDiscoveryTypes(threadPool, transportService, namedWriteableRegistry, + clusterService, hostsProvider).entrySet().forEach(entry -> { if (discoveryTypes.put(entry.getKey(), entry.getValue()) != null) { throw new IllegalArgumentException("Cannot register discovery type [" + entry.getKey() + "] twice"); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index c8527168198..c65542093d3 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -170,7 +170,7 @@ public class MembershipAction extends AbstractComponent { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - this.state = ClusterState.Builder.readFrom(in, localNode.get()); + this.state = ClusterState.readFrom(in, localNode.get()); } @Override diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java index 9f0d3576c4b..11ef5b9ee14 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java @@ -34,6 +34,8 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -79,6 +81,7 @@ public class PublishClusterStateAction extends AbstractComponent { } private final TransportService transportService; + private final NamedWriteableRegistry namedWriteableRegistry; private final Supplier clusterStateSupplier; private final NewPendingClusterStateListener newPendingClusterStatelistener; private final DiscoverySettings discoverySettings; @@ -88,12 +91,14 @@ public class PublishClusterStateAction extends AbstractComponent { public PublishClusterStateAction( Settings settings, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, Supplier clusterStateSupplier, NewPendingClusterStateListener listener, DiscoverySettings discoverySettings, ClusterName clusterName) { super(settings); this.transportService = transportService; + this.namedWriteableRegistry = namedWriteableRegistry; this.clusterStateSupplier = clusterStateSupplier; this.newPendingClusterStatelistener = listener; this.discoverySettings = discoverySettings; @@ -376,15 +381,16 @@ public class PublishClusterStateAction extends AbstractComponent { } else { in = request.bytes().streamInput(); } + in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry); in.setVersion(request.version()); synchronized (lastSeenClusterStateMutex) { final ClusterState incomingState; // If true we received full cluster state - otherwise diffs if (in.readBoolean()) { - incomingState = ClusterState.Builder.readFrom(in, clusterStateSupplier.get().nodes().getLocalNode()); + incomingState = ClusterState.readFrom(in, clusterStateSupplier.get().nodes().getLocalNode()); logger.debug("received full cluster state version [{}] with size [{}]", incomingState.version(), request.bytes().length()); } else if (lastSeenClusterState != null) { - Diff diff = lastSeenClusterState.readDiffFrom(in); + Diff diff = ClusterState.readDiffFrom(in, lastSeenClusterState.nodes().getLocalNode()); incomingState = diff.apply(lastSeenClusterState); logger.debug("received diff cluster state version [{}] with uuid [{}], diff size [{}]", incomingState.version(), incomingState.stateUUID(), request.bytes().length()); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index fdadb775ad6..ce7dba98797 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -43,6 +43,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasables; @@ -102,6 +103,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin"; private final TransportService transportService; + private final NamedWriteableRegistry namedWriteableRegistry; private final ClusterService clusterService; private AllocationService allocationService; private final ClusterName clusterName; @@ -138,11 +140,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, ClusterService clusterService, UnicastHostsProvider hostsProvider) { super(settings); this.clusterService = clusterService; this.clusterName = clusterService.getClusterName(); this.transportService = transportService; + this.namedWriteableRegistry = namedWriteableRegistry; this.discoverySettings = new DiscoverySettings(settings, clusterService.getClusterSettings()); this.zenPing = newZenPing(settings, threadPool, transportService, hostsProvider); this.electMaster = new ElectMasterService(settings); @@ -179,6 +183,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover new PublishClusterStateAction( settings, transportService, + namedWriteableRegistry, clusterService::state, new NewPendingClusterStateListener(), discoverySettings, diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 401f3f12f4b..f1cdb5ae575 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -45,6 +45,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -372,7 +373,7 @@ public final class NodeEnvironment implements Closeable { private static NodeMetaData loadOrCreateNodeMetaData(Settings settings, Logger logger, NodePath... nodePaths) throws IOException { final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new); - NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, paths); + NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, paths); if (metaData == null) { metaData = new NodeMetaData(generateNodeId(settings)); } diff --git a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index b1891191500..5f75771e9e6 100644 --- a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -223,7 +223,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { fromNode = new DiscoveryNode(in); indices = new IndexMetaData[in.readVInt()]; for (int i = 0; i < indices.length; i++) { - indices[i] = IndexMetaData.Builder.readFrom(in); + indices[i] = IndexMetaData.readFrom(in); } } diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index a4509315f16..b6fd1842b80 100644 --- a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -182,7 +182,7 @@ public abstract class MetaDataStateFormat { * Reads the state from a given file and compares the expected version against the actual version of * the state. */ - public final T read(Path file) throws IOException { + public final T read(NamedXContentRegistry namedXContentRegistry, Path file) throws IOException { try (Directory dir = newDirectory(file.getParent())) { try (final IndexInput indexInput = dir.openInput(file.getFileName().toString(), IOContext.DEFAULT)) { // We checksum the entire file before we even go and parse it. If it's corrupted we barf right here. @@ -197,8 +197,7 @@ public abstract class MetaDataStateFormat { long filePointer = indexInput.getFilePointer(); long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; try (IndexInput slice = indexInput.slice("state_xcontent", filePointer, contentSize)) { - // It is safe to use EMPTY here because this never uses namedObject - try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(NamedXContentRegistry.EMPTY, + try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(namedXContentRegistry, new InputStreamIndexInput(slice, contentSize))) { return fromXContent(parser); } @@ -262,7 +261,7 @@ public abstract class MetaDataStateFormat { * @param dataLocations the data-locations to try. * @return the latest state or null if no state was found. */ - public T loadLatestState(Logger logger, Path... dataLocations) throws IOException { + public T loadLatestState(Logger logger, NamedXContentRegistry namedXContentRegistry, Path... dataLocations) throws IOException { List files = new ArrayList<>(); long maxStateId = -1; boolean maxStateIdIsLegacy = true; @@ -313,15 +312,14 @@ public abstract class MetaDataStateFormat { logger.debug("{}: no data for [{}], ignoring...", prefix, stateFile.toAbsolutePath()); continue; } - // EMPTY is safe here because no parser uses namedObject - try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, new BytesArray(data))) { + try (XContentParser parser = XContentHelper.createParser(namedXContentRegistry, new BytesArray(data))) { state = fromXContent(parser); } if (state == null) { logger.debug("{}: no data for [{}], ignoring...", prefix, stateFile.toAbsolutePath()); } } else { - state = read(stateFile); + state = read(namedXContentRegistry, stateFile); logger.trace("state id [{}] read from [{}]", id, stateFile.getFileName()); } return state; diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 1673a653a6b..b900305ab55 100644 --- a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; @@ -40,10 +41,12 @@ import java.util.function.Predicate; public class MetaStateService extends AbstractComponent { private final NodeEnvironment nodeEnv; + private final NamedXContentRegistry namedXContentRegistry; - public MetaStateService(Settings settings, NodeEnvironment nodeEnv) { + public MetaStateService(Settings settings, NodeEnvironment nodeEnv, NamedXContentRegistry namedXContentRegistry) { super(settings); this.nodeEnv = nodeEnv; + this.namedXContentRegistry = namedXContentRegistry; } /** @@ -59,7 +62,8 @@ public class MetaStateService extends AbstractComponent { metaDataBuilder = MetaData.builder(); } for (String indexFolderName : nodeEnv.availableIndexFolders()) { - IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, nodeEnv.resolveIndexFolder(indexFolderName)); + IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, + nodeEnv.resolveIndexFolder(indexFolderName)); if (indexMetaData != null) { metaDataBuilder.put(indexMetaData, false); } else { @@ -74,7 +78,7 @@ public class MetaStateService extends AbstractComponent { */ @Nullable public IndexMetaData loadIndexState(Index index) throws IOException { - return IndexMetaData.FORMAT.loadLatestState(logger, nodeEnv.indexPaths(index)); + return IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.indexPaths(index)); } /** @@ -86,7 +90,7 @@ public class MetaStateService extends AbstractComponent { if (excludeIndexPathIdsPredicate.test(indexFolderName)) { continue; } - IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, + IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.resolveIndexFolder(indexFolderName)); if (indexMetaData != null) { final String indexPathId = indexMetaData.getIndex().getUUID(); @@ -106,7 +110,7 @@ public class MetaStateService extends AbstractComponent { * Loads the global state, *without* index state, see {@link #loadFullState()} for that. */ MetaData loadGlobalState() throws IOException { - return MetaData.FORMAT.loadLatestState(logger, nodeEnv.nodeDataPaths()); + return MetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); } /** diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 3e6769200e9..13c317c53e9 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -185,7 +185,7 @@ public class TransportNodesListGatewayMetaState extends TransportNodesAction { - - public static final BlobStoreIndexShardSnapshot PROTO = new BlobStoreIndexShardSnapshot(); +public class BlobStoreIndexShardSnapshot implements ToXContent { /** * Information about snapshotted file @@ -478,6 +475,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil private static final ParseField PARSE_NUMBER_OF_FILES = new ParseField("number_of_files"); private static final ParseField PARSE_TOTAL_SIZE = new ParseField("total_size"); private static final ParseField PARSE_FILES = new ParseField("files"); + private static final ParseFieldMatcher parseFieldMatcher = ParseFieldMatcher.EMPTY; /** * Serializes shard snapshot metadata info into JSON @@ -507,8 +505,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil * @param parser parser * @return shard snapshot metadata */ - public BlobStoreIndexShardSnapshot fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { - + public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) throws IOException { String snapshot = null; long indexVersion = -1; long startTime = 0; diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java index 5b66d9b6f6f..526edd27560 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.snapshots.blobstore; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.xcontent.FromXContentBuilder; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -44,9 +43,7 @@ import static java.util.Collections.unmodifiableMap; * This class is used to find files that were already snapshotted and clear out files that no longer referenced by any * snapshots */ -public class BlobStoreIndexShardSnapshots implements Iterable, ToXContent, FromXContentBuilder { - - public static final BlobStoreIndexShardSnapshots PROTO = new BlobStoreIndexShardSnapshots(); +public class BlobStoreIndexShardSnapshots implements Iterable, ToXContent { private final List shardSnapshots; private final Map files; @@ -156,6 +153,7 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To static final ParseField FILES = new ParseField("files"); static final ParseField SNAPSHOTS = new ParseField("snapshots"); } + private static final ParseFieldMatcher parseFieldMatcher = ParseFieldMatcher.EMPTY; /** * Writes index file for the shard in the following format. @@ -232,8 +230,7 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To return builder; } - @Override - public BlobStoreIndexShardSnapshots fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + public static BlobStoreIndexShardSnapshots fromXContent(XContentParser parser) throws IOException { XContentParser.Token token = parser.currentToken(); if (token == null) { // New parser token = parser.nextToken(); diff --git a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index 707df5feb1e..84d3354ba4e 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.AsyncShardFetch; import org.elasticsearch.index.IndexService; @@ -130,7 +131,8 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction, ParseFieldMatcherSupplier> INGEST_METADATA_PARSER = new ObjectParser<>( "ingest_metadata", ArrayList::new); @@ -67,7 +67,7 @@ public final class IngestMetadata implements MetaData.Custom { } @Override - public String type() { + public String getWriteableName() { return TYPE; } @@ -75,15 +75,14 @@ public final class IngestMetadata implements MetaData.Custom { return pipelines; } - @Override - public IngestMetadata readFrom(StreamInput in) throws IOException { + public IngestMetadata(StreamInput in) throws IOException { int size = in.readVInt(); Map pipelines = new HashMap<>(size); for (int i = 0; i < size; i++) { - PipelineConfiguration pipeline = PipelineConfiguration.readPipelineConfiguration(in); + PipelineConfiguration pipeline = PipelineConfiguration.readFrom(in); pipelines.put(pipeline.getId(), pipeline); } - return new IngestMetadata(pipelines); + this.pipelines = Collections.unmodifiableMap(pipelines); } @Override @@ -94,8 +93,7 @@ public final class IngestMetadata implements MetaData.Custom { } } - @Override - public IngestMetadata fromXContent(XContentParser parser) throws IOException { + public static IngestMetadata fromXContent(XContentParser parser) throws IOException { Map pipelines = new HashMap<>(); List configs = INGEST_METADATA_PARSER.parse(parser, () -> ParseFieldMatcher.STRICT); for (PipelineConfiguration pipeline : configs) { @@ -124,12 +122,11 @@ public final class IngestMetadata implements MetaData.Custom { return new IngestMetadataDiff((IngestMetadata) before, this); } - @Override - public Diff readDiffFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { return new IngestMetadataDiff(in); } - static class IngestMetadataDiff implements Diff { + static class IngestMetadataDiff implements NamedDiff { final Diff> pipelines; @@ -138,7 +135,8 @@ public final class IngestMetadata implements MetaData.Custom { } public IngestMetadataDiff(StreamInput in) throws IOException { - pipelines = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), PipelineConfiguration.PROTOTYPE); + pipelines = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), PipelineConfiguration::readFrom, + PipelineConfiguration::readDiffFrom); } @Override @@ -150,6 +148,11 @@ public final class IngestMetadata implements MetaData.Custom { public void writeTo(StreamOutput out) throws IOException { pipelines.writeTo(out); } + + @Override + public String getWriteableName() { + return TYPE; + } } @Override diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index 9e15e445f12..c983c205f8d 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -20,6 +20,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.bytes.BytesReference; @@ -39,11 +40,6 @@ import java.util.Map; */ public final class PipelineConfiguration extends AbstractDiffable implements ToXContent { - static final PipelineConfiguration PROTOTYPE = new PipelineConfiguration(null, null); - - public static PipelineConfiguration readPipelineConfiguration(StreamInput in) throws IOException { - return PROTOTYPE.readFrom(in); - } private static final ObjectParser PARSER = new ObjectParser<>("pipeline_config", Builder::new); static { PARSER.declareString(Builder::setId, new ParseField("id")); @@ -103,11 +99,14 @@ public final class PipelineConfiguration extends AbstractDiffable readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(PipelineConfiguration::readFrom, in); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 49115d8b3c4..5bd96464f56 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -325,9 +325,6 @@ public class Node implements Closeable { final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool); clusterService.addListener(scriptModule.getScriptService()); resourcesToClose.add(clusterService); - final TribeService tribeService = new TribeService(settings, clusterService, nodeId, - s -> newTribeClientNode(s, classpathPlugins)); - resourcesToClose.add(tribeService); final IngestService ingestService = new IngestService(settings, threadPool, this.environment, scriptModule.getScriptService(), analysisModule.getAnalysisRegistry(), pluginsService.filterPlugins(IngestPlugin.class)); final ClusterInfoService clusterInfoService = newClusterInfoService(settings, clusterService, threadPool, client); @@ -349,7 +346,6 @@ public class Node implements Closeable { settingsModule.getClusterSettings(), threadPool, pluginsService.filterPlugins(ActionPlugin.class)); modules.add(actionModule); modules.add(new GatewayModule()); - modules.add(new RepositoriesModule(this.environment, pluginsService.filterPlugins(RepositoryPlugin.class))); CircuitBreakerService circuitBreakerService = createCircuitBreakerService(settingsModule.getSettings(), settingsModule.getClusterSettings()); resourcesToClose.add(circuitBreakerService); @@ -361,15 +357,21 @@ public class Node implements Closeable { indicesModule.getNamedWriteables().stream(), searchModule.getNamedWriteables().stream(), pluginsService.filterPlugins(Plugin.class).stream() - .flatMap(p -> p.getNamedWriteables().stream())) + .flatMap(p -> p.getNamedWriteables().stream()), + ClusterModule.getNamedWriteables().stream()) .flatMap(Function.identity()).collect(Collectors.toList()); final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); NamedXContentRegistry xContentRegistry = new NamedXContentRegistry(Stream.of( - searchModule.getNamedXContents().stream(), - pluginsService.filterPlugins(Plugin.class).stream() - .flatMap(p -> p.getNamedXContent().stream()) - ).flatMap(Function.identity()).collect(toList())); - final MetaStateService metaStateService = new MetaStateService(settings, nodeEnvironment); + searchModule.getNamedXContents().stream(), + pluginsService.filterPlugins(Plugin.class).stream() + .flatMap(p -> p.getNamedXContent().stream()), + ClusterModule.getNamedXWriteables().stream()) + .flatMap(Function.identity()).collect(toList())); + final TribeService tribeService = new TribeService(settings, clusterService, nodeId, namedWriteableRegistry, + s -> newTribeClientNode(s, classpathPlugins)); + resourcesToClose.add(tribeService); + modules.add(new RepositoriesModule(this.environment, pluginsService.filterPlugins(RepositoryPlugin.class), xContentRegistry)); + final MetaStateService metaStateService = new MetaStateService(settings, nodeEnvironment, xContentRegistry); final IndicesService indicesService = new IndicesService(settings, pluginsService, nodeEnvironment, xContentRegistry, settingsModule.getClusterSettings(), analysisModule.getAnalysisRegistry(), clusterModule.getIndexNameExpressionResolver(), indicesModule.getMapperRegistry(), namedWriteableRegistry, @@ -407,7 +409,7 @@ public class Node implements Closeable { } final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, - networkService, clusterService, pluginsService.filterPlugins(DiscoveryPlugin.class)); + namedWriteableRegistry, networkService, clusterService, pluginsService.filterPlugins(DiscoveryPlugin.class)); modules.add(b -> { b.bind(SearchRequestParsers.class).toInstance(searchModule.getSearchRequestParsers()); b.bind(SearchExtRegistry.class).toInstance(searchModule.getSearchExtRegistry()); diff --git a/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java b/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java index 37b97855084..61e87d83a18 100644 --- a/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.function.Supplier; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; @@ -60,6 +61,7 @@ public interface DiscoveryPlugin { * @param hostsProvider Use to find configured hosts which should be pinged for initial discovery */ default Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, ClusterService clusterService, UnicastHostsProvider hostsProvider) { return Collections.emptyMap(); } diff --git a/core/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java b/core/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java index 9306ee37076..a3af52a9a4a 100644 --- a/core/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java @@ -22,6 +22,7 @@ package org.elasticsearch.plugins; import java.util.Collections; import java.util.Map; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.Repository; @@ -38,7 +39,7 @@ public interface RepositoryPlugin { * The key of the returned {@link Map} is the type name of the repository and * the value is a factory to construct the {@link Repository} interface. */ - default Map getRepositories(Environment env) { + default Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { return Collections.emptyMap(); } } diff --git a/core/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java b/core/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java index 50ab90b4fe1..d03e2c1ac34 100644 --- a/core/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java +++ b/core/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java @@ -28,6 +28,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.TransportNodesSna import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.binder.LinkedBindingBuilder; import org.elasticsearch.common.inject.multibindings.MapBinder; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.fs.FsRepository; @@ -43,13 +44,13 @@ public class RepositoriesModule extends AbstractModule { private final Map repositoryTypes; - public RepositoriesModule(Environment env, List repoPlugins) { + public RepositoriesModule(Environment env, List repoPlugins, NamedXContentRegistry namedXContentRegistry) { Map factories = new HashMap<>(); - factories.put(FsRepository.TYPE, (metadata) -> new FsRepository(metadata, env)); - factories.put(URLRepository.TYPE, (metadata) -> new URLRepository(metadata, env)); + factories.put(FsRepository.TYPE, (metadata) -> new FsRepository(metadata, env, namedXContentRegistry)); + factories.put(URLRepository.TYPE, (metadata) -> new URLRepository(metadata, env, namedXContentRegistry)); for (RepositoryPlugin repoPlugin : repoPlugins) { - Map newRepoTypes = repoPlugin.getRepositories(env); + Map newRepoTypes = repoPlugin.getRepositories(env, namedXContentRegistry); for (Map.Entry entry : newRepoTypes.entrySet()) { if (factories.put(entry.getKey(), entry.getValue()) != null) { throw new IllegalArgumentException("Repository type [" + entry.getKey() + "] is already registered"); diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java index aadf871c09f..9a4f6395484 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java @@ -19,11 +19,10 @@ package org.elasticsearch.repositories.blobstore; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.FromXContentBuilder; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.NamedXContentRegistry.FromXContent; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; @@ -41,9 +40,9 @@ public abstract class BlobStoreFormat { protected final String blobNameFormat; - protected final FromXContentBuilder reader; + protected final FromXContent reader; - protected final ParseFieldMatcher parseFieldMatcher; + protected final NamedXContentRegistry namedXContentRegistry; // Serialization parameters to specify correct context for metadata serialization protected static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; @@ -61,12 +60,11 @@ public abstract class BlobStoreFormat { /** * @param blobNameFormat format of the blobname in {@link String#format(Locale, String, Object...)} format * @param reader the prototype object that can deserialize objects with type T - * @param parseFieldMatcher parse field matcher */ - protected BlobStoreFormat(String blobNameFormat, FromXContentBuilder reader, ParseFieldMatcher parseFieldMatcher) { + protected BlobStoreFormat(String blobNameFormat, FromXContent reader, NamedXContentRegistry namedXContentRegistry) { this.reader = reader; this.blobNameFormat = blobNameFormat; - this.parseFieldMatcher = parseFieldMatcher; + this.namedXContentRegistry = namedXContentRegistry; } /** @@ -110,9 +108,8 @@ public abstract class BlobStoreFormat { } protected T read(BytesReference bytes) throws IOException { - // EMPTY is safe here because no reader calls namedObject - try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, bytes)) { - T obj = reader.fromXContent(parser, parseFieldMatcher); + try (XContentParser parser = XContentHelper.createParser(namedXContentRegistry, bytes)) { + T obj = reader.fromXContent(parser); return obj; } } diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 72855bc7f30..6935d277eed 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -45,7 +45,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Numbers; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; @@ -168,6 +167,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp protected final RepositoryMetaData metadata; + protected final NamedXContentRegistry namedXContentRegistry; + private static final int BUFFER_SIZE = 4096; private static final String SNAPSHOT_PREFIX = "snap-"; @@ -214,8 +215,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final boolean readOnly; - private final ParseFieldMatcher parseFieldMatcher; - private final ChecksumBlobStoreFormat indexShardSnapshotFormat; private final ChecksumBlobStoreFormat indexShardSnapshotsFormat; @@ -226,25 +225,29 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp * @param metadata The metadata for this repository including name and settings * @param globalSettings Settings for the node this repository object is created on */ - protected BlobStoreRepository(RepositoryMetaData metadata, Settings globalSettings) { + protected BlobStoreRepository(RepositoryMetaData metadata, Settings globalSettings, NamedXContentRegistry namedXContentRegistry) { super(globalSettings); this.metadata = metadata; - parseFieldMatcher = new ParseFieldMatcher(settings); + this.namedXContentRegistry = namedXContentRegistry; snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); restoreRateLimiter = getRateLimiter(metadata.settings(), "max_restore_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); readOnly = metadata.settings().getAsBoolean("readonly", false); - indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher, isCompress()); - indexShardSnapshotsFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_INDEX_CODEC, SNAPSHOT_INDEX_NAME_FORMAT, BlobStoreIndexShardSnapshots.PROTO, parseFieldMatcher, isCompress()); + indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, + BlobStoreIndexShardSnapshot::fromXContent, namedXContentRegistry, isCompress()); + indexShardSnapshotsFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_INDEX_CODEC, SNAPSHOT_INDEX_NAME_FORMAT, + BlobStoreIndexShardSnapshots::fromXContent, namedXContentRegistry, isCompress()); } @Override protected void doStart() { this.snapshotsBlobContainer = blobStore().blobContainer(basePath()); - ParseFieldMatcher parseFieldMatcher = new ParseFieldMatcher(settings); - globalMetaDataFormat = new ChecksumBlobStoreFormat<>(METADATA_CODEC, METADATA_NAME_FORMAT, MetaData.PROTO, parseFieldMatcher, isCompress()); - indexMetaDataFormat = new ChecksumBlobStoreFormat<>(INDEX_METADATA_CODEC, METADATA_NAME_FORMAT, IndexMetaData.PROTO, parseFieldMatcher, isCompress()); - snapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, SnapshotInfo.PROTO, parseFieldMatcher, isCompress()); + globalMetaDataFormat = new ChecksumBlobStoreFormat<>(METADATA_CODEC, METADATA_NAME_FORMAT, + MetaData::fromXContent, namedXContentRegistry, isCompress()); + indexMetaDataFormat = new ChecksumBlobStoreFormat<>(INDEX_METADATA_CODEC, METADATA_NAME_FORMAT, + IndexMetaData::fromXContent, namedXContentRegistry, isCompress()); + snapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, + SnapshotInfo::fromXContent, namedXContentRegistry, isCompress()); } @Override diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 17fad25e610..4cf46f38235 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -23,7 +23,6 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.OutputStreamIndexOutput; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -33,7 +32,8 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; -import org.elasticsearch.common.xcontent.FromXContentBuilder; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.NamedXContentRegistry.FromXContent; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -73,8 +73,9 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm * @param compress true if the content should be compressed * @param xContentType content type that should be used for write operations */ - public ChecksumBlobStoreFormat(String codec, String blobNameFormat, FromXContentBuilder reader, ParseFieldMatcher parseFieldMatcher, boolean compress, XContentType xContentType) { - super(blobNameFormat, reader, parseFieldMatcher); + public ChecksumBlobStoreFormat(String codec, String blobNameFormat, FromXContent reader, + NamedXContentRegistry namedXContentRegistry, boolean compress, XContentType xContentType) { + super(blobNameFormat, reader, namedXContentRegistry); this.xContentType = xContentType; this.compress = compress; this.codec = codec; @@ -86,8 +87,9 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm * @param reader prototype object that can deserialize T from XContent * @param compress true if the content should be compressed */ - public ChecksumBlobStoreFormat(String codec, String blobNameFormat, FromXContentBuilder reader, ParseFieldMatcher parseFieldMatcher, boolean compress) { - this(codec, blobNameFormat, reader, parseFieldMatcher, compress, DEFAULT_X_CONTENT_TYPE); + public ChecksumBlobStoreFormat(String codec, String blobNameFormat, FromXContent reader, + NamedXContentRegistry namedXContentRegistry, boolean compress) { + this(codec, blobNameFormat, reader, namedXContentRegistry, compress, DEFAULT_X_CONTENT_TYPE); } /** diff --git a/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java index c028913d343..b490a2e784d 100644 --- a/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.blobstore.fs.FsBlobStore; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; @@ -72,8 +73,9 @@ public class FsRepository extends BlobStoreRepository { /** * Constructs a shared file system repository. */ - public FsRepository(RepositoryMetaData metadata, Environment environment) throws IOException { - super(metadata, environment.settings()); + public FsRepository(RepositoryMetaData metadata, Environment environment, + NamedXContentRegistry namedXContentRegistry) throws IOException { + super(metadata, environment.settings(), namedXContentRegistry); String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); if (location.isEmpty()) { logger.warn("the repository location is missing, it should point to a shared file system location that is available on all master and data nodes"); diff --git a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java index 5ca335c5953..fdeb27819bf 100644 --- a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.blobstore.url.URLBlobStore; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.URIPattern; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; @@ -77,8 +78,9 @@ public class URLRepository extends BlobStoreRepository { /** * Constructs a read-only URL-based repository */ - public URLRepository(RepositoryMetaData metadata, Environment environment) throws IOException { - super(metadata, environment.settings()); + public URLRepository(RepositoryMetaData metadata, Environment environment, + NamedXContentRegistry namedXContentRegistry) throws IOException { + super(metadata, environment.settings(), namedXContentRegistry); if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(settings) == false) { throw new RepositoryException(metadata.name(), "missing url"); diff --git a/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java index 44456dbec20..7c99f3a80dd 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -22,6 +22,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesArray; @@ -45,7 +46,6 @@ import java.util.Map; public final class ScriptMetaData implements MetaData.Custom { public static final String TYPE = "stored_scripts"; - public static final ScriptMetaData PROTO = new ScriptMetaData(Collections.emptyMap()); private final Map scripts; @@ -108,12 +108,11 @@ public final class ScriptMetaData implements MetaData.Custom { } @Override - public String type() { + public String getWriteableName() { return TYPE; } - @Override - public ScriptMetaData fromXContent(XContentParser parser) throws IOException { + public static ScriptMetaData fromXContent(XContentParser parser) throws IOException { Map scripts = new HashMap<>(); String key = null; for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { @@ -136,16 +135,14 @@ public final class ScriptMetaData implements MetaData.Custom { return MetaData.ALL_CONTEXTS; } - @Override - public ScriptMetaData readFrom(StreamInput in) throws IOException { + public ScriptMetaData(StreamInput in) throws IOException { int size = in.readVInt(); - Map scripts = new HashMap<>(); + this.scripts = new HashMap<>(); for (int i = 0; i < size; i++) { String languageAndId = in.readString(); BytesReference script = in.readBytesReference(); scripts.put(languageAndId, new ScriptAsBytes(script)); } - return new ScriptMetaData(scripts); } @Override @@ -170,8 +167,7 @@ public final class ScriptMetaData implements MetaData.Custom { return new ScriptMetadataDiff((ScriptMetaData) before, this); } - @Override - public Diff readDiffFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { return new ScriptMetadataDiff(in); } @@ -237,7 +233,7 @@ public final class ScriptMetaData implements MetaData.Custom { } } - static final class ScriptMetadataDiff implements Diff { + static final class ScriptMetadataDiff implements NamedDiff { final Diff> pipelines; @@ -246,7 +242,8 @@ public final class ScriptMetaData implements MetaData.Custom { } public ScriptMetadataDiff(StreamInput in) throws IOException { - pipelines = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), new ScriptAsBytes(null)); + pipelines = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), ScriptAsBytes::new, + ScriptAsBytes::readDiffFrom); } @Override @@ -258,6 +255,11 @@ public final class ScriptMetaData implements MetaData.Custom { public void writeTo(StreamOutput out) throws IOException { pipelines.writeTo(out); } + + @Override + public String getWriteableName() { + return TYPE; + } } static final class ScriptAsBytes extends AbstractDiffable { @@ -273,9 +275,12 @@ public final class ScriptMetaData implements MetaData.Custom { out.writeBytesReference(script); } - @Override - public ScriptAsBytes readFrom(StreamInput in) throws IOException { - return new ScriptAsBytes(in.readBytesReference()); + public ScriptAsBytes(StreamInput in) throws IOException { + this(in.readBytesReference()); + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(ScriptAsBytes::new, in); } @Override diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index ddcee4b0353..e6934f91fa1 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; -import org.elasticsearch.common.xcontent.FromXContentBuilder; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -42,9 +41,8 @@ import java.util.Objects; /** * Information about a snapshot */ -public final class SnapshotInfo implements Comparable, ToXContent, FromXContentBuilder, Writeable { +public final class SnapshotInfo implements Comparable, ToXContent, Writeable { - public static final SnapshotInfo PROTO = new SnapshotInfo(new SnapshotId("", ""), Collections.emptyList(), 0); public static final String CONTEXT_MODE_PARAM = "context_mode"; public static final String CONTEXT_MODE_SNAPSHOT = "SNAPSHOT"; private static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("strictDateOptionalTime"); @@ -373,11 +371,6 @@ public final class SnapshotInfo implements Comparable, ToXContent, return builder; } - @Override - public SnapshotInfo fromXContent(final XContentParser parser, final ParseFieldMatcher matcher) throws IOException { - return fromXContent(parser); - } - /** * This method creates a SnapshotInfo from internal x-content. It does not * handle x-content written with the external version as external x-content diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index dd87019ee47..fb9d7babf20 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -44,6 +44,10 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.hash.MurmurHash3; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.regex.Regex; @@ -61,6 +65,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.transport.TransportSettings; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -207,10 +212,13 @@ public class TribeService extends AbstractLifecycleComponent { private final List nodes = new CopyOnWriteArrayList<>(); + private final NamedWriteableRegistry namedWriteableRegistry; + public TribeService(Settings settings, ClusterService clusterService, final String tribeNodeId, - Function clientNodeBuilder) { + NamedWriteableRegistry namedWriteableRegistry, Function clientNodeBuilder) { super(settings); this.clusterService = clusterService; + this.namedWriteableRegistry = namedWriteableRegistry; Map nodesSettings = new HashMap<>(settings.getGroups("tribe", true)); nodesSettings.remove("blocks"); // remove prefix settings that don't indicate a client nodesSettings.remove("on_conflict"); // remove prefix settings that don't indicate a client @@ -512,7 +520,7 @@ public class TribeService extends AbstractLifecycleComponent { .map(ClusterState::metaData) .map(clusterMetaData -> ((MetaData.Custom) clusterMetaData.custom(customMetaDataType))) .filter(custom1 -> custom1 != null && custom1 instanceof MergableCustomMetaData) - .map(custom2 -> (MergableCustomMetaData) custom2) + .map(custom2 -> (MergableCustomMetaData) marshal(custom2)) .collect(Collectors.toList()) ); for (String changedCustomMetaDataType : changedCustomMetaDataTypeSet) { @@ -577,4 +585,21 @@ public class TribeService extends AbstractLifecycleComponent { } return changedCustomMetaDataMap; } + + /** + * Since custom metadata can be loaded by a plugin class loader that resides in a sub-node, we need to + * marshal this object into something the tribe node can work with + */ + private MetaData.Custom marshal(MetaData.Custom custom) { + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()){ + bytesStreamOutput.writeNamedWriteable(custom); + try(StreamInput input = bytesStreamOutput.bytes().streamInput()) { + StreamInput namedInput = new NamedWriteableAwareStreamInput(input, namedWriteableRegistry); + MetaData.Custom marshaled = namedInput.readNamedWriteable(MetaData.Custom.class); + return marshaled; + } + } catch (IOException ex) { + throw new IllegalStateException("cannot marshal object with type " + custom.getWriteableName() + " to tribe node"); + } + } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 43adf182c8b..d8e1fea2848 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -29,6 +29,7 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -171,7 +172,8 @@ public abstract class TaskManagerTestCase extends ESTestCase { clusterService = createClusterService(threadPool); transportService = new TransportService(settings, new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), - new NamedWriteableRegistry(Collections.emptyList()), new NetworkService(settings, Collections.emptyList())), + new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), + new NetworkService(settings, Collections.emptyList())), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null) { @Override protected TaskManager createTaskManager() { diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 6657a307908..20add0530d8 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -544,7 +544,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-"); Path nodeDir = getNodeDir(indexFile); logger.info("Parsing cluster state files from index [{}]", indexName); - final MetaData metaData = globalFormat.loadLatestState(logger, nodeDir); + final MetaData metaData = globalFormat.loadLatestState(logger, xContentRegistry(), nodeDir); assertNotNull(metaData); final Version version = Version.fromString(indexName.substring("index-".length())); @@ -555,7 +555,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { dataDir = nodeDir.getParent(); } final Path indexDir = getIndexDir(logger, indexName, indexFile, dataDir); - assertNotNull(indexFormat.loadLatestState(logger, indexDir)); + assertNotNull(indexFormat.loadLatestState(logger, xContentRegistry(), indexDir)); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java index 63d34f683de..240add84464 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java @@ -244,7 +244,7 @@ public class ClusterChangedEventTests extends ESTestCase { event = new ClusterChangedEvent("_na_", originalState, nextState); Set changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet(); assertTrue(changedCustomMetaDataTypeSet.size() == 1); - assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.type())); + assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.getWriteableName())); // next state has same custom metadata nextState = nextState(originalState, Collections.singletonList(customMetaData1)); @@ -263,14 +263,14 @@ public class ClusterChangedEventTests extends ESTestCase { event = new ClusterChangedEvent("_na_", stateWithCustomMetaData, nextState); changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet(); assertTrue(changedCustomMetaDataTypeSet.size() == 1); - assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.type())); + assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.getWriteableName())); // next state updates custom metadata nextState = nextState(stateWithCustomMetaData, Collections.singletonList(new CustomMetaData1("data1"))); event = new ClusterChangedEvent("_na_", stateWithCustomMetaData, nextState); changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet(); assertTrue(changedCustomMetaDataTypeSet.size() == 1); - assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.type())); + assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.getWriteableName())); // next state adds new custom metadata type CustomMetaData2 customMetaData2 = new CustomMetaData2("data2"); @@ -278,15 +278,15 @@ public class ClusterChangedEventTests extends ESTestCase { event = new ClusterChangedEvent("_na_", stateWithCustomMetaData, nextState); changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet(); assertTrue(changedCustomMetaDataTypeSet.size() == 1); - assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData2.type())); + assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData2.getWriteableName())); // next state adds two custom metadata type nextState = nextState(originalState, Arrays.asList(customMetaData1, customMetaData2)); event = new ClusterChangedEvent("_na_", originalState, nextState); changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet(); assertTrue(changedCustomMetaDataTypeSet.size() == 2); - assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData2.type())); - assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.type())); + assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData2.getWriteableName())); + assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.getWriteableName())); // next state removes two custom metadata type nextState = originalState; @@ -294,25 +294,17 @@ public class ClusterChangedEventTests extends ESTestCase { nextState(originalState, Arrays.asList(customMetaData1, customMetaData2)), nextState); changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet(); assertTrue(changedCustomMetaDataTypeSet.size() == 2); - assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData2.type())); - assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.type())); + assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData2.getWriteableName())); + assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.getWriteableName())); } private static class CustomMetaData2 extends TestCustomMetaData { - static { - MetaData.registerPrototype("2", new CustomMetaData2("")); - } protected CustomMetaData2(String data) { super(data); } @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new CustomMetaData2(data); - } - - @Override - public String type() { + public String getWriteableName() { return "2"; } @@ -323,20 +315,12 @@ public class ClusterChangedEventTests extends ESTestCase { } private static class CustomMetaData1 extends TestCustomMetaData { - static { - MetaData.registerPrototype("1", new CustomMetaData1("")); - } protected CustomMetaData1(String data) { super(data); } @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new CustomMetaData1(data); - } - - @Override - public String type() { + public String getWriteableName() { return "1"; } @@ -378,7 +362,7 @@ public class ClusterChangedEventTests extends ESTestCase { } } for (TestCustomMetaData testCustomMetaData : customMetaDataList) { - metaDataBuilder.putCustom(testCustomMetaData.type(), testCustomMetaData); + metaDataBuilder.putCustom(testCustomMetaData.getWriteableName(), testCustomMetaData); } builder.metaData(metaDataBuilder); return builder.build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 9fdbf13fc8c..722f9590b56 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -31,6 +31,8 @@ import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -75,13 +77,15 @@ import static org.hamcrest.Matchers.is; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 0, numClientNodes = 0) public class ClusterStateDiffIT extends ESIntegTestCase { public void testClusterStateDiffSerialization() throws Exception { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); DiscoveryNode masterNode = new DiscoveryNode("master", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); DiscoveryNode otherNode = new DiscoveryNode("other", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(masterNode).add(otherNode).localNodeId(masterNode.getId()).build(); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); - ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode); + ClusterState clusterStateFromDiffs = + ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode, namedWriteableRegistry); int iterationCount = randomIntBetween(10, 300); for (int iteration = 0; iteration < iterationCount; iteration++) { @@ -117,7 +121,8 @@ public class ClusterStateDiffIT extends ESIntegTestCase { if (randomIntBetween(0, 10) < 1) { // Update cluster state via full serialization from time to time - clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), previousClusterStateFromDiffs.nodes().getLocalNode()); + clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), + previousClusterStateFromDiffs.nodes().getLocalNode(), namedWriteableRegistry); } else { // Update cluster states using diffs Diff diffBeforeSerialization = clusterState.diff(previousClusterState); @@ -126,7 +131,8 @@ public class ClusterStateDiffIT extends ESIntegTestCase { byte[] diffBytes = BytesReference.toBytes(os.bytes()); Diff diff; try (StreamInput input = StreamInput.wrap(diffBytes)) { - diff = previousClusterStateFromDiffs.readDiffFrom(input); + StreamInput namedInput = new NamedWriteableAwareStreamInput(input, namedWriteableRegistry); + diff = ClusterState.readDiffFrom(namedInput, previousClusterStateFromDiffs.nodes().getLocalNode()); clusterStateFromDiffs = diff.apply(previousClusterStateFromDiffs); } } @@ -618,7 +624,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase { @Override public MetaData.Builder put(MetaData.Builder builder, MetaData.Custom part) { - return builder.putCustom(part.type(), part); + return builder.putCustom(part.getWriteableName(), part); } @Override @@ -660,7 +666,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase { @Override public ClusterState.Builder put(ClusterState.Builder builder, ClusterState.Custom part) { - return builder.putCustom(part.type(), part); + return builder.putCustom(part.getWriteableName(), part); } @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java index 0ecde102e80..ea5c55d9a0d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java @@ -57,7 +57,7 @@ public class IndexGraveyardTests extends ESTestCase { final IndexGraveyard graveyard = createRandom(); final BytesStreamOutput out = new BytesStreamOutput(); graveyard.writeTo(out); - assertThat(IndexGraveyard.fromStream(out.bytes().streamInput()), equalTo(graveyard)); + assertThat(new IndexGraveyard(out.bytes().streamInput()), equalTo(graveyard)); } public void testXContent() throws IOException { @@ -68,7 +68,7 @@ public class IndexGraveyardTests extends ESTestCase { builder.endObject(); XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); parser.nextToken(); // the beginning of the parser - assertThat(IndexGraveyard.PROTO.fromXContent(parser), equalTo(graveyard)); + assertThat(IndexGraveyard.fromXContent(parser), equalTo(graveyard)); } public void testAddTombstones() { diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java index caeb889800c..7b11f96ac4d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java @@ -53,7 +53,7 @@ public class IndexMetaDataTests extends ESTestCase { metaData.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); - final IndexMetaData fromXContentMeta = IndexMetaData.PROTO.fromXContent(parser, null); + final IndexMetaData fromXContentMeta = IndexMetaData.fromXContent(parser); assertEquals(metaData, fromXContentMeta); assertEquals(metaData.hashCode(), fromXContentMeta.hashCode()); @@ -67,7 +67,7 @@ public class IndexMetaDataTests extends ESTestCase { final BytesStreamOutput out = new BytesStreamOutput(); metaData.writeTo(out); - IndexMetaData deserialized = IndexMetaData.PROTO.readFrom(out.bytes().streamInput()); + IndexMetaData deserialized = IndexMetaData.readFrom(out.bytes().streamInput()); assertEquals(metaData, deserialized); assertEquals(metaData.hashCode(), deserialized.hashCode()); diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java index 5265a7548a6..bfc6f5d78d2 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java @@ -58,7 +58,7 @@ public class IndexTemplateMetaDataTests extends ESTestCase { try (StreamInput in = bytes.streamInput()) { in.setVersion(Version.V_5_0_0); - IndexTemplateMetaData readMetaData = IndexTemplateMetaData.Builder.readFrom(in); + IndexTemplateMetaData readMetaData = IndexTemplateMetaData.readFrom(in); assertEquals(0, in.available()); assertEquals(metaData.getName(), readMetaData.getName()); assertEquals(metaData.getPatterns(), readMetaData.getPatterns()); diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index 4d248adf1e6..6cc6a1cb54c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -20,9 +20,13 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -173,7 +177,7 @@ public class MetaDataTests extends ESTestCase { originalMeta.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); - final MetaData fromXContentMeta = MetaData.PROTO.fromXContent(parser, null); + final MetaData fromXContentMeta = MetaData.fromXContent(parser); assertThat(fromXContentMeta.indexGraveyard(), equalTo(originalMeta.indexGraveyard())); } @@ -182,7 +186,10 @@ public class MetaDataTests extends ESTestCase { final MetaData originalMeta = MetaData.builder().indexGraveyard(graveyard).build(); final BytesStreamOutput out = new BytesStreamOutput(); originalMeta.writeTo(out); - final MetaData fromStreamMeta = MetaData.PROTO.readFrom(out.bytes().streamInput()); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); + final MetaData fromStreamMeta = MetaData.readFrom( + new NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry) + ); assertThat(fromStreamMeta.indexGraveyard(), equalTo(fromStreamMeta.indexGraveyard())); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index 00d9a8ff096..d6252fd4366 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.serialization; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -29,9 +30,12 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import java.util.Collections; + import static org.hamcrest.Matchers.equalTo; public class ClusterSerializationTests extends ESAllocationTestCase { @@ -52,7 +56,8 @@ public class ClusterSerializationTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(); clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState, "reroute").routingTable()).build(); - ClusterState serializedClusterState = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), newNode("node1")); + ClusterState serializedClusterState = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), newNode("node1"), + new NamedWriteableRegistry(ClusterModule.getNamedWriteables())); assertThat(serializedClusterState.getClusterName().value(), equalTo(clusterState.getClusterName().value())); @@ -79,7 +84,7 @@ public class ClusterSerializationTests extends ESAllocationTestCase { BytesStreamOutput outStream = new BytesStreamOutput(); source.writeTo(outStream); StreamInput inStream = outStream.bytes().streamInput(); - RoutingTable target = RoutingTable.Builder.readFrom(inStream); + RoutingTable target = RoutingTable.readFrom(inStream); assertThat(target.toString(), equalTo(source.toString())); } diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java index 611c261e334..4475aed7dc3 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java @@ -65,7 +65,7 @@ public class DiffableTests extends ESTestCase { @Override protected MapDiff readDiff(StreamInput in) throws IOException { return useProtoForDiffableSerialization - ? DiffableUtils.readJdkMapDiff(in, keySerializer, TestDiffable.PROTO) + ? DiffableUtils.readJdkMapDiff(in, keySerializer, TestDiffable::readFrom, TestDiffable::readDiffFrom) : DiffableUtils.readJdkMapDiff(in, keySerializer, diffableValueSerializer()); } }.execute(); @@ -113,7 +113,7 @@ public class DiffableTests extends ESTestCase { @Override protected MapDiff readDiff(StreamInput in) throws IOException { return useProtoForDiffableSerialization - ? DiffableUtils.readImmutableOpenMapDiff(in, keySerializer, TestDiffable.PROTO) + ? DiffableUtils.readImmutableOpenMapDiff(in, keySerializer, TestDiffable::readFrom, TestDiffable::readDiffFrom) : DiffableUtils.readImmutableOpenMapDiff(in, keySerializer, diffableValueSerializer()); } }.execute(); @@ -161,7 +161,7 @@ public class DiffableTests extends ESTestCase { @Override protected MapDiff readDiff(StreamInput in) throws IOException { return useProtoForDiffableSerialization - ? DiffableUtils.readImmutableOpenIntMapDiff(in, keySerializer, TestDiffable.PROTO) + ? DiffableUtils.readImmutableOpenIntMapDiff(in, keySerializer, TestDiffable::readFrom, TestDiffable::readDiffFrom) : DiffableUtils.readImmutableOpenIntMapDiff(in, keySerializer, diffableValueSerializer()); } }.execute(); @@ -398,7 +398,7 @@ public class DiffableTests extends ESTestCase { @Override public Diff readDiff(StreamInput in, K key) throws IOException { - return AbstractDiffable.readDiffFrom(TestDiffable.PROTO, in); + return AbstractDiffable.readDiffFrom(TestDiffable::readFrom, in); } }; } @@ -419,8 +419,6 @@ public class DiffableTests extends ESTestCase { public static class TestDiffable extends AbstractDiffable { - public static final TestDiffable PROTO = new TestDiffable(""); - private final String value; public TestDiffable(String value) { @@ -431,11 +429,14 @@ public class DiffableTests extends ESTestCase { return value; } - @Override - public TestDiffable readFrom(StreamInput in) throws IOException { + public static TestDiffable readFrom(StreamInput in) throws IOException { return new TestDiffable(in.readString()); } + public static Diff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(TestDiffable::readFrom, in); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(value); diff --git a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java index 916926e36a4..581d58d896d 100644 --- a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.MetaDataStateFormat; @@ -223,7 +224,7 @@ public class IndexFolderUpgraderTests extends ESTestCase { assertEquals(indexFolders.size(), 1); // ensure index metadata is moved - IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, + IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, nodeEnvironment.resolveIndexFolder(indexFolders.iterator().next())); assertNotNull(indexMetaData); Index index = indexMetaData.getIndex(); @@ -265,7 +266,8 @@ public class IndexFolderUpgraderTests extends ESTestCase { int numIdxFiles, int numTranslogFiles) throws IOException { final Index index = indexSettings.getIndex(); // ensure index state can be loaded - IndexMetaData loadLatestState = IndexMetaData.FORMAT.loadLatestState(logger, nodeEnv.indexPaths(index)); + IndexMetaData loadLatestState = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, + nodeEnv.indexPaths(index)); assertNotNull(loadLatestState); assertEquals(loadLatestState.getIndex(), index); for (int shardId = 0; shardId < indexSettings.getNumberOfShards(); shardId++) { diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index a2001504f19..378557b4047 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -31,6 +31,7 @@ import java.util.function.Supplier; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -55,6 +56,7 @@ import static org.mockito.Mockito.when; public class DiscoveryModuleTests extends ESTestCase { private TransportService transportService; + private NamedWriteableRegistry namedWriteableRegistry; private ClusterService clusterService; private ThreadPool threadPool; @@ -71,6 +73,7 @@ public class DiscoveryModuleTests extends ESTestCase { Map> impl(); @Override default Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, ClusterService clusterService, UnicastHostsProvider hostsProvider) { return impl(); } @@ -80,6 +83,7 @@ public class DiscoveryModuleTests extends ESTestCase { public void setupDummyServices() { transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, null, null); clusterService = mock(ClusterService.class); + namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); threadPool = mock(ThreadPool.class); @@ -91,7 +95,7 @@ public class DiscoveryModuleTests extends ESTestCase { } private DiscoveryModule newModule(Settings settings, List plugins) { - return new DiscoveryModule(settings, threadPool, transportService, null, clusterService, plugins); + return new DiscoveryModule(settings, threadPool, transportService, namedWriteableRegistry, null, clusterService, plugins); } public void testDefaults() { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java index e1d2a226a02..5b6668c6f5e 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -36,6 +37,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -248,9 +250,11 @@ public class PublishClusterStateActionTests extends ESTestCase { ) { DiscoverySettings discoverySettings = new DiscoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); return new MockPublishAction( settings, transportService, + namedWriteableRegistry, clusterStateSupplier, listener, discoverySettings, @@ -873,10 +877,10 @@ public class PublishClusterStateActionTests extends ESTestCase { AtomicBoolean timeoutOnCommit = new AtomicBoolean(); AtomicBoolean errorOnCommit = new AtomicBoolean(); - public MockPublishAction(Settings settings, TransportService transportService, + public MockPublishAction(Settings settings, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, Supplier clusterStateSupplier, NewPendingClusterStateListener listener, DiscoverySettings discoverySettings, ClusterName clusterName) { - super(settings, transportService, clusterStateSupplier, listener, discoverySettings, clusterName); + super(settings, transportService, namedWriteableRegistry, clusterStateSupplier, listener, discoverySettings, clusterName); } @Override diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index cc8d43cc79e..7670e8122f3 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -236,12 +236,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { } @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new CustomMetaData(data); - } - - @Override - public String type() { + public String getWriteableName() { return TYPE; } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index b46df47afbc..af4b2b826f5 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -36,6 +36,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -50,6 +51,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; @@ -289,7 +291,8 @@ public class ZenDiscoveryUnitTests extends ESTestCase { } private ZenDiscovery buildZenDiscovery(Settings settings, TransportService service, ClusterService clusterService, ThreadPool threadPool) { - ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, clusterService, Collections::emptyList); + ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), + clusterService, Collections::emptyList); zenDiscovery.start(); return zenDiscovery; } diff --git a/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java b/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java index 91d6d3f08fe..4502e32e6d3 100644 --- a/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java @@ -47,7 +47,7 @@ public class DanglingIndicesStateTests extends ESTestCase { public void testCleanupWhenEmpty() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); assertTrue(danglingState.getDanglingIndices().isEmpty()); @@ -58,7 +58,7 @@ public class DanglingIndicesStateTests extends ESTestCase { } public void testDanglingIndicesDiscovery() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); assertTrue(danglingState.getDanglingIndices().isEmpty()); @@ -76,7 +76,7 @@ public class DanglingIndicesStateTests extends ESTestCase { public void testInvalidIndexFolder() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); MetaData metaData = MetaData.builder().build(); @@ -100,7 +100,7 @@ public class DanglingIndicesStateTests extends ESTestCase { public void testDanglingProcessing() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); MetaData metaData = MetaData.builder().build(); @@ -144,7 +144,7 @@ public class DanglingIndicesStateTests extends ESTestCase { public void testDanglingIndicesNotImportedWhenTombstonePresent() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); final Settings.Builder settings = Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_INDEX_UUID, "test1UUID"); diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index f5a5391436a..5c5826adf67 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -61,12 +61,6 @@ import static org.hamcrest.Matchers.equalTo; */ public class GatewayMetaStateTests extends ESAllocationTestCase { - @Before - public void setup() { - MetaData.registerPrototype(CustomMetaData1.TYPE, new CustomMetaData1("")); - MetaData.registerPrototype(CustomMetaData2.TYPE, new CustomMetaData2("")); - } - ClusterChangedEvent generateEvent(boolean initializing, boolean versionChanged, boolean masterEligible) { //ridiculous settings to make sure we don't run into uninitialized because fo default AllocationService strategy = createAllocationService(Settings.builder() @@ -409,12 +403,7 @@ public class GatewayMetaStateTests extends ESAllocationTestCase { } @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new CustomMetaData1(data); - } - - @Override - public String type() { + public String getWriteableName() { return TYPE; } @@ -432,12 +421,7 @@ public class GatewayMetaStateTests extends ESAllocationTestCase { } @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new CustomMetaData2(data); - } - - @Override - public String type() { + public String getWriteableName() { return TYPE; } @@ -450,7 +434,7 @@ public class GatewayMetaStateTests extends ESAllocationTestCase { private static MetaData randomMetaData(TestCustomMetaData... customMetaDatas) { MetaData.Builder builder = MetaData.builder(); for (TestCustomMetaData customMetaData : customMetaDatas) { - builder.putCustom(customMetaData.type(), customMetaData); + builder.putCustom(customMetaData.getWriteableName(), customMetaData); } for (int i = 0; i < randomIntBetween(1, 5); i++) { builder.put( diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index 0f0e69b2643..e507cfa7798 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -89,7 +90,7 @@ public class MetaDataStateFormatTests extends ESTestCase { assertThat(resource, notNullValue()); Path dst = tmp.resolve("global-3.st"); Files.copy(resource, dst); - MetaData read = format.read(dst); + MetaData read = format.read(xContentRegistry(), dst); assertThat(read, notNullValue()); assertThat(read.clusterUUID(), equalTo("3O1tDF1IRB6fSJ-GrTMUtg")); // indices are empty since they are serialized separately @@ -114,7 +115,7 @@ public class MetaDataStateFormatTests extends ESTestCase { list = content("foo-*", stateDir); assertEquals(list.length, 1); assertThat(list[0].getFileName().toString(), equalTo("foo-" + id + ".st")); - DummyState read = format.read(list[0]); + DummyState read = format.read(NamedXContentRegistry.EMPTY, list[0]); assertThat(read, equalTo(state)); } final int version2 = between(version, Integer.MAX_VALUE); @@ -130,7 +131,7 @@ public class MetaDataStateFormatTests extends ESTestCase { list = content("foo-*", stateDir); assertEquals(list.length,1); assertThat(list[0].getFileName().toString(), equalTo("foo-"+ (id+1) + ".st")); - DummyState read = format.read(list[0]); + DummyState read = format.read(NamedXContentRegistry.EMPTY, list[0]); assertThat(read, equalTo(state2)); } @@ -156,7 +157,7 @@ public class MetaDataStateFormatTests extends ESTestCase { list = content("foo-*", stateDir); assertEquals(list.length, 1); assertThat(list[0].getFileName().toString(), equalTo("foo-" + id + ".st")); - DummyState read = format.read(list[0]); + DummyState read = format.read(NamedXContentRegistry.EMPTY, list[0]); assertThat(read, equalTo(state)); } } @@ -180,12 +181,12 @@ public class MetaDataStateFormatTests extends ESTestCase { list = content("foo-*", stateDir); assertEquals(list.length, 1); assertThat(list[0].getFileName().toString(), equalTo("foo-" + id + ".st")); - DummyState read = format.read(list[0]); + DummyState read = format.read(NamedXContentRegistry.EMPTY, list[0]); assertThat(read, equalTo(state)); // now corrupt it corruptFile(list[0], logger); try { - format.read(list[0]); + format.read(NamedXContentRegistry.EMPTY, list[0]); fail("corrupted file"); } catch (CorruptStateException ex) { // expected @@ -272,7 +273,7 @@ public class MetaDataStateFormatTests extends ESTestCase { } List dirList = Arrays.asList(dirs); Collections.shuffle(dirList, random()); - MetaData loadedMetaData = format.loadLatestState(logger, dirList.toArray(new Path[0])); + MetaData loadedMetaData = format.loadLatestState(logger, xContentRegistry(), dirList.toArray(new Path[0])); MetaData latestMetaData = meta.get(numStates-1); assertThat(loadedMetaData.clusterUUID(), not(equalTo("_na_"))); assertThat(loadedMetaData.clusterUUID(), equalTo(latestMetaData.clusterUUID())); @@ -299,7 +300,7 @@ public class MetaDataStateFormatTests extends ESTestCase { MetaDataStateFormatTests.corruptFile(file, logger); } try { - format.loadLatestState(logger, dirList.toArray(new Path[0])); + format.loadLatestState(logger, xContentRegistry(), dirList.toArray(new Path[0])); fail("latest version can not be read"); } catch (ElasticsearchException ex) { assertThat(ExceptionsHelper.unwrap(ex, CorruptStateException.class), notNullValue()); diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java index 62f040c0163..ec7607085c0 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java @@ -39,7 +39,7 @@ public class MetaStateServiceTests extends ESTestCase { public void testWriteLoadIndex() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build(); metaStateService.writeIndex("test_write", index); @@ -49,14 +49,14 @@ public class MetaStateServiceTests extends ESTestCase { public void testLoadMissingIndex() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); assertThat(metaStateService.loadIndexState(new Index("test1", "test1UUID")), nullValue()); } } public void testWriteLoadGlobal() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); MetaData metaData = MetaData.builder() .persistentSettings(Settings.builder().put("test1", "value1").build()) @@ -68,7 +68,7 @@ public class MetaStateServiceTests extends ESTestCase { public void testWriteGlobalStateWithIndexAndNoIndexIsLoaded() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); MetaData metaData = MetaData.builder() .persistentSettings(Settings.builder().put("test1", "value1").build()) @@ -84,7 +84,7 @@ public class MetaStateServiceTests extends ESTestCase { public void testLoadGlobal() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build(); MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index a7620901826..8816baceb00 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -2158,7 +2158,7 @@ public class InternalEngineTests extends ESTestCase { final long size = Files.size(tlogFile); logger.debug("upgrading index {} file: {} size: {}", indexName, tlogFiles[0].getFileName(), size); Directory directory = newFSDirectory(src.resolve("0").resolve("index")); - final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, src); + final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), src); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetaData); final Store store = createStore(indexSettings, directory); final int iters = randomIntBetween(0, 2); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 972232debaa..ed81c237677 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -62,6 +62,7 @@ import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.VersionType; @@ -139,7 +140,7 @@ import static org.hamcrest.Matchers.nullValue; public class IndexShardTests extends IndexShardTestCase { public static ShardStateMetaData load(Logger logger, Path... shardPaths) throws IOException { - return ShardStateMetaData.FORMAT.loadLatestState(logger, shardPaths); + return ShardStateMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, shardPaths); } public static void write(ShardStateMetaData shardStateMetaData, diff --git a/core/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java b/core/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java index 8d36598b4d9..e2fca8b4112 100644 --- a/core/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java @@ -58,7 +58,7 @@ public class IngestMetadataTests extends ESTestCase { builder.endObject(); XContentBuilder shuffled = shuffleXContent(builder); final XContentParser parser = createParser(shuffled); - MetaData.Custom custom = ingestMetadata.fromXContent(parser); + MetaData.Custom custom = IngestMetadata.fromXContent(parser); assertTrue(custom instanceof IngestMetadata); IngestMetadata m = (IngestMetadata) custom; assertEquals(2, m.getPipelines().size()); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java b/core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java index 47b6d87d7db..4b2b9a49dc0 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java @@ -80,7 +80,7 @@ public class ScriptMetaDataTests extends ESTestCase { XContentParser parser = createParser(xContentBuilder); parser.nextToken(); - ScriptMetaData result = ScriptMetaData.PROTO.fromXContent(parser); + ScriptMetaData result = ScriptMetaData.fromXContent(parser); assertEquals(expected, result); assertEquals(expected.hashCode(), result.hashCode()); } @@ -90,7 +90,7 @@ public class ScriptMetaDataTests extends ESTestCase { ByteArrayOutputStream out = new ByteArrayOutputStream(); expected.writeTo(new OutputStreamStreamOutput(out)); - ScriptMetaData result = ScriptMetaData.PROTO.readFrom(new InputStreamStreamInput(new ByteArrayInputStream(out.toByteArray()))); + ScriptMetaData result = new ScriptMetaData(new InputStreamStreamInput(new ByteArrayInputStream(out.toByteArray()))); assertEquals(expected, result); assertEquals(expected.hashCode(), result.hashCode()); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java b/core/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java index 617fcf9ebc3..7e1793bd05c 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.snapshots; import org.elasticsearch.ElasticsearchCorruptionException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; @@ -32,7 +31,6 @@ import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.FromXContentBuilder; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -56,12 +54,9 @@ import static org.hamcrest.Matchers.greaterThan; public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase { - private static final ParseFieldMatcher parseFieldMatcher = new ParseFieldMatcher(Settings.EMPTY); - public static final String BLOB_CODEC = "blob"; - private static class BlobObj implements ToXContent, FromXContentBuilder { - public static final BlobObj PROTO = new BlobObj(""); + private static class BlobObj implements ToXContent { private final String text; @@ -73,8 +68,7 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase { return text; } - @Override - public BlobObj fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + public static BlobObj fromXContent(XContentParser parser) throws IOException { String text = null; XContentParser.Token token = parser.currentToken(); if (token == null) { @@ -114,9 +108,12 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase { public void testBlobStoreOperations() throws IOException { BlobStore blobStore = createTestBlobStore(); BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); - ChecksumBlobStoreFormat checksumJSON = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, false, XContentType.JSON); - ChecksumBlobStoreFormat checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, false, XContentType.SMILE); - ChecksumBlobStoreFormat checksumSMILECompressed = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, true, XContentType.SMILE); + ChecksumBlobStoreFormat checksumJSON = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, + xContentRegistry(), false, XContentType.JSON); + ChecksumBlobStoreFormat checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, + xContentRegistry(), false, XContentType.SMILE); + ChecksumBlobStoreFormat checksumSMILECompressed = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, + xContentRegistry(), true, XContentType.SMILE); // Write blobs in different formats checksumJSON.write(new BlobObj("checksum json"), blobContainer, "check-json"); @@ -139,8 +136,10 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase { for (int i = 0; i < randomIntBetween(100, 300); i++) { veryRedundantText.append("Blah "); } - ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, false, randomBoolean() ? XContentType.SMILE : XContentType.JSON); - ChecksumBlobStoreFormat checksumFormatComp = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, true, randomBoolean() ? XContentType.SMILE : XContentType.JSON); + ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, + xContentRegistry(), false, randomBoolean() ? XContentType.SMILE : XContentType.JSON); + ChecksumBlobStoreFormat checksumFormatComp = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, + xContentRegistry(), true, randomBoolean() ? XContentType.SMILE : XContentType.JSON); BlobObj blobObj = new BlobObj(veryRedundantText.toString()); checksumFormatComp.write(blobObj, blobContainer, "blob-comp"); checksumFormat.write(blobObj, blobContainer, "blob-not-comp"); @@ -154,7 +153,8 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase { BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); String testString = randomAsciiOfLength(randomInt(10000)); BlobObj blobObj = new BlobObj(testString); - ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, randomBoolean(), randomBoolean() ? XContentType.SMILE : XContentType.JSON); + ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, + xContentRegistry(), randomBoolean(), randomBoolean() ? XContentType.SMILE : XContentType.JSON); checksumFormat.write(blobObj, blobContainer, "test-path"); assertEquals(checksumFormat.read(blobContainer, "test-path").getText(), testString); randomCorruption(blobContainer, "test-path"); @@ -188,7 +188,8 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase { return builder; } }; - final ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, randomBoolean(), randomBoolean() ? XContentType.SMILE : XContentType.JSON); + final ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, + xContentRegistry(), randomBoolean(), randomBoolean() ? XContentType.SMILE : XContentType.JSON); ExecutorService threadPool = Executors.newFixedThreadPool(1); try { Future future = threadPool.submit(new Callable() { diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 1d0ddf39b01..5769a8e89f3 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -35,15 +35,24 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.indices.recovery.RecoveryState; @@ -62,6 +71,7 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.TestCustomMetaData; import org.elasticsearch.test.rest.FakeRestRequest; +import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; @@ -88,9 +98,51 @@ import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { + public static class TestCustomMetaDataPlugin extends Plugin { + + private final List namedWritables = new ArrayList<>(); + private final List namedXContents = new ArrayList<>(); + + public TestCustomMetaDataPlugin() { + registerBuiltinWritables(); + } + + private void registerMetaDataCustom(String name, Writeable.Reader reader, + Writeable.Reader diffReader, + NamedXContentRegistry.FromXContent parser) { + namedWritables.add(new NamedWriteableRegistry.Entry(MetaData.Custom.class, name, reader)); + namedWritables.add(new NamedWriteableRegistry.Entry(NamedDiff.class, name, diffReader)); + namedXContents.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(name), parser)); + } + + private void registerBuiltinWritables() { + registerMetaDataCustom(SnapshottableMetadata.TYPE, SnapshottableMetadata::readFrom, + SnapshottableMetadata::readDiffFrom, SnapshottableMetadata::fromXContent); + registerMetaDataCustom(NonSnapshottableMetadata.TYPE, NonSnapshottableMetadata::readFrom, + NonSnapshottableMetadata::readDiffFrom, NonSnapshottableMetadata::fromXContent); + registerMetaDataCustom(SnapshottableGatewayMetadata.TYPE, SnapshottableGatewayMetadata::readFrom, + SnapshottableGatewayMetadata::readDiffFrom, SnapshottableGatewayMetadata::fromXContent); + registerMetaDataCustom(NonSnapshottableGatewayMetadata.TYPE, NonSnapshottableGatewayMetadata::readFrom, + NonSnapshottableGatewayMetadata::readDiffFrom, NonSnapshottableGatewayMetadata::fromXContent); + registerMetaDataCustom(SnapshotableGatewayNoApiMetadata.TYPE, SnapshotableGatewayNoApiMetadata::readFrom, + NonSnapshottableGatewayMetadata::readDiffFrom, SnapshotableGatewayNoApiMetadata::fromXContent); + + } + + @Override + public List getNamedWriteables() { + return namedWritables; + } + + @Override + public List getNamedXContent() { + return namedXContents; + } + } + @Override protected Collection> nodePlugins() { - return Arrays.asList(MockRepository.Plugin.class); + return Arrays.asList(MockRepository.Plugin.class, TestCustomMetaDataPlugin.class); } public void testRestorePersistentSettings() throws Exception { @@ -780,33 +832,31 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest } - static { - MetaData.registerPrototype(SnapshottableMetadata.TYPE, SnapshottableMetadata.PROTO); - MetaData.registerPrototype(NonSnapshottableMetadata.TYPE, NonSnapshottableMetadata.PROTO); - MetaData.registerPrototype(SnapshottableGatewayMetadata.TYPE, SnapshottableGatewayMetadata.PROTO); - MetaData.registerPrototype(NonSnapshottableGatewayMetadata.TYPE, NonSnapshottableGatewayMetadata.PROTO); - MetaData.registerPrototype(SnapshotableGatewayNoApiMetadata.TYPE, SnapshotableGatewayNoApiMetadata.PROTO); - } - public static class SnapshottableMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable"; - public static final SnapshottableMetadata PROTO = new SnapshottableMetadata(""); - public SnapshottableMetadata(String data) { super(data); } @Override - public String type() { + public String getWriteableName() { return TYPE; } - @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new SnapshottableMetadata(data); + public static SnapshottableMetadata readFrom(StreamInput in) throws IOException { + return readFrom(SnapshottableMetadata::new, in); } + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(TYPE, in); + } + + public static SnapshottableMetadata fromXContent(XContentParser parser) throws IOException { + return fromXContent(SnapshottableMetadata::new, parser); + } + + @Override public EnumSet context() { return MetaData.API_AND_SNAPSHOT; @@ -816,20 +866,25 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest public static class NonSnapshottableMetadata extends TestCustomMetaData { public static final String TYPE = "test_non_snapshottable"; - public static final NonSnapshottableMetadata PROTO = new NonSnapshottableMetadata(""); - public NonSnapshottableMetadata(String data) { super(data); } @Override - public String type() { + public String getWriteableName() { return TYPE; } - @Override - protected NonSnapshottableMetadata newTestCustomMetaData(String data) { - return new NonSnapshottableMetadata(data); + public static NonSnapshottableMetadata readFrom(StreamInput in) throws IOException { + return readFrom(NonSnapshottableMetadata::new, in); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(TYPE, in); + } + + public static NonSnapshottableMetadata fromXContent(XContentParser parser) throws IOException { + return fromXContent(NonSnapshottableMetadata::new, parser); } @Override @@ -841,20 +896,25 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest public static class SnapshottableGatewayMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable_gateway"; - public static final SnapshottableGatewayMetadata PROTO = new SnapshottableGatewayMetadata(""); - public SnapshottableGatewayMetadata(String data) { super(data); } @Override - public String type() { + public String getWriteableName() { return TYPE; } - @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new SnapshottableGatewayMetadata(data); + public static SnapshottableGatewayMetadata readFrom(StreamInput in) throws IOException { + return readFrom(SnapshottableGatewayMetadata::new, in); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(TYPE, in); + } + + public static SnapshottableGatewayMetadata fromXContent(XContentParser parser) throws IOException { + return fromXContent(SnapshottableGatewayMetadata::new, parser); } @Override @@ -866,20 +926,25 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest public static class NonSnapshottableGatewayMetadata extends TestCustomMetaData { public static final String TYPE = "test_non_snapshottable_gateway"; - public static final NonSnapshottableGatewayMetadata PROTO = new NonSnapshottableGatewayMetadata(""); - public NonSnapshottableGatewayMetadata(String data) { super(data); } @Override - public String type() { + public String getWriteableName() { return TYPE; } - @Override - protected NonSnapshottableGatewayMetadata newTestCustomMetaData(String data) { - return new NonSnapshottableGatewayMetadata(data); + public static NonSnapshottableGatewayMetadata readFrom(StreamInput in) throws IOException { + return readFrom(NonSnapshottableGatewayMetadata::new, in); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(TYPE, in); + } + + public static NonSnapshottableGatewayMetadata fromXContent(XContentParser parser) throws IOException { + return fromXContent(NonSnapshottableGatewayMetadata::new, parser); } @Override @@ -892,20 +957,21 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest public static class SnapshotableGatewayNoApiMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable_gateway_no_api"; - public static final SnapshotableGatewayNoApiMetadata PROTO = new SnapshotableGatewayNoApiMetadata(""); - public SnapshotableGatewayNoApiMetadata(String data) { super(data); } @Override - public String type() { + public String getWriteableName() { return TYPE; } - @Override - protected SnapshotableGatewayNoApiMetadata newTestCustomMetaData(String data) { - return new SnapshotableGatewayNoApiMetadata(data); + public static SnapshotableGatewayNoApiMetadata readFrom(StreamInput in) throws IOException { + return readFrom(SnapshotableGatewayNoApiMetadata::new, in); + } + + public static SnapshotableGatewayNoApiMetadata fromXContent(XContentParser parser) throws IOException { + return fromXContent(SnapshotableGatewayNoApiMetadata::new, parser); } @Override diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index 7fb717cd7c2..a8c37fc3d71 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; @@ -63,8 +64,8 @@ public class MockRepository extends FsRepository { @Override - public Map getRepositories(Environment env) { - return Collections.singletonMap("mock", (metadata) -> new MockRepository(metadata, env)); + public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { + return Collections.singletonMap("mock", (metadata) -> new MockRepository(metadata, env, namedXContentRegistry)); } @Override @@ -101,8 +102,9 @@ public class MockRepository extends FsRepository { private volatile boolean blocked = false; - public MockRepository(RepositoryMetaData metadata, Environment environment) throws IOException { - super(overrideSettings(metadata, environment), environment); + public MockRepository(RepositoryMetaData metadata, Environment environment, + NamedXContentRegistry namedXContentRegistry) throws IOException { + super(overrideSettings(metadata, environment), environment, namedXContentRegistry); randomControlIOExceptionRate = metadata.settings().getAsDouble("random_control_io_exception_rate", 0.0); randomDataFileIOExceptionRate = metadata.settings().getAsDouble("random_data_file_io_exception_rate", 0.0); useLuceneCorruptionException = metadata.settings().getAsBoolean("use_lucene_corruption", false); diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index 2153390d610..a63d16a9364 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -24,8 +24,8 @@ import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.MetaData; @@ -33,6 +33,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; @@ -53,6 +55,7 @@ import org.junit.AfterClass; import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -117,9 +120,37 @@ public class TribeIT extends ESIntegTestCase { .build(); } + public static class TestCustomMetaDataPlugin extends Plugin { + + private final List namedWritables = new ArrayList<>(); + + public TestCustomMetaDataPlugin() { + registerBuiltinWritables(); + } + + private void registerMetaDataCustom(String name, Writeable.Reader reader, + Writeable.Reader diffReader) { + namedWritables.add(new NamedWriteableRegistry.Entry(MetaData.Custom.class, name, reader)); + namedWritables.add(new NamedWriteableRegistry.Entry(NamedDiff.class, name, diffReader)); + } + + private void registerBuiltinWritables() { + registerMetaDataCustom(MergableCustomMetaData1.TYPE, MergableCustomMetaData1::readFrom, MergableCustomMetaData1::readDiffFrom); + registerMetaDataCustom(MergableCustomMetaData2.TYPE, MergableCustomMetaData2::readFrom, MergableCustomMetaData2::readDiffFrom); + } + + @Override + public List getNamedWriteables() { + return namedWritables; + } + } + @Override protected Collection> nodePlugins() { - return getMockPlugins(); + ArrayList> plugins = new ArrayList<>(); + plugins.addAll(getMockPlugins()); + plugins.add(TestCustomMetaDataPlugin.class); + return plugins; } @Before @@ -456,7 +487,6 @@ public class TribeIT extends ESIntegTestCase { } public void testMergingRemovedCustomMetaData() throws Exception { - MetaData.registerPrototype(MergableCustomMetaData1.TYPE, new MergableCustomMetaData1("")); removeCustomMetaData(cluster1, MergableCustomMetaData1.TYPE); removeCustomMetaData(cluster2, MergableCustomMetaData1.TYPE); MergableCustomMetaData1 customMetaData1 = new MergableCustomMetaData1("a"); @@ -466,13 +496,12 @@ public class TribeIT extends ESIntegTestCase { putCustomMetaData(cluster1, customMetaData1); putCustomMetaData(cluster2, customMetaData2); assertCustomMetaDataUpdated(internalCluster(), customMetaData2); - removeCustomMetaData(cluster2, customMetaData2.type()); + removeCustomMetaData(cluster2, customMetaData2.getWriteableName()); assertCustomMetaDataUpdated(internalCluster(), customMetaData1); } } public void testMergingCustomMetaData() throws Exception { - MetaData.registerPrototype(MergableCustomMetaData1.TYPE, new MergableCustomMetaData1("")); removeCustomMetaData(cluster1, MergableCustomMetaData1.TYPE); removeCustomMetaData(cluster2, MergableCustomMetaData1.TYPE); MergableCustomMetaData1 customMetaData1 = new MergableCustomMetaData1(randomAsciiOfLength(10)); @@ -490,8 +519,6 @@ public class TribeIT extends ESIntegTestCase { } public void testMergingMultipleCustomMetaData() throws Exception { - MetaData.registerPrototype(MergableCustomMetaData1.TYPE, new MergableCustomMetaData1("")); - MetaData.registerPrototype(MergableCustomMetaData2.TYPE, new MergableCustomMetaData2("")); removeCustomMetaData(cluster1, MergableCustomMetaData1.TYPE); removeCustomMetaData(cluster2, MergableCustomMetaData1.TYPE); MergableCustomMetaData1 firstCustomMetaDataType1 = new MergableCustomMetaData1(randomAsciiOfLength(10)); @@ -521,10 +548,10 @@ public class TribeIT extends ESIntegTestCase { assertCustomMetaDataUpdated(internalCluster(), mergedCustomMetaDataType2.get(0)); // test removing custom md is propagates to tribe - removeCustomMetaData(cluster2, secondCustomMetaDataType1.type()); + removeCustomMetaData(cluster2, secondCustomMetaDataType1.getWriteableName()); assertCustomMetaDataUpdated(internalCluster(), firstCustomMetaDataType1); assertCustomMetaDataUpdated(internalCluster(), mergedCustomMetaDataType2.get(0)); - removeCustomMetaData(cluster2, secondCustomMetaDataType2.type()); + removeCustomMetaData(cluster2, secondCustomMetaDataType2.getWriteableName()); assertCustomMetaDataUpdated(internalCluster(), firstCustomMetaDataType1); assertCustomMetaDataUpdated(internalCluster(), firstCustomMetaDataType2); } @@ -534,7 +561,7 @@ public class TribeIT extends ESIntegTestCase { TestCustomMetaData expectedCustomMetaData) throws Exception { assertBusy(() -> { ClusterState tribeState = cluster.getInstance(ClusterService.class, cluster.getNodeNames()[0]).state(); - MetaData.Custom custom = tribeState.metaData().custom(expectedCustomMetaData.type()); + MetaData.Custom custom = tribeState.metaData().custom(expectedCustomMetaData.getWriteableName()); assertNotNull(custom); assertThat(custom, equalTo(expectedCustomMetaData)); }); @@ -546,9 +573,9 @@ public class TribeIT extends ESIntegTestCase { } private void putCustomMetaData(InternalTestCluster cluster, final TestCustomMetaData customMetaData) { - logger.info("putting custom_md type [{}] with data[{}] from [{}]", customMetaData.type(), + logger.info("putting custom_md type [{}] with data[{}] from [{}]", customMetaData.getWriteableName(), customMetaData.getData(), cluster.getClusterName()); - updateMetaData(cluster, builder -> builder.putCustom(customMetaData.type(), customMetaData)); + updateMetaData(cluster, builder -> builder.putCustom(customMetaData.getWriteableName(), customMetaData)); } private static void updateMetaData(InternalTestCluster cluster, UnaryOperator addCustoms) { diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java b/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java index 7aea02c552b..f41e2496ce2 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java @@ -19,13 +19,16 @@ package org.elasticsearch.tribe; +import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TestCustomMetaData; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -185,13 +188,16 @@ public class TribeServiceTests extends ESTestCase { } @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new MergableCustomMetaData1(data); + public String getWriteableName() { + return TYPE; } - @Override - public String type() { - return TYPE; + public static MergableCustomMetaData1 readFrom(StreamInput in) throws IOException { + return readFrom(MergableCustomMetaData1::new, in); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(TYPE, in); } @Override @@ -214,15 +220,19 @@ public class TribeServiceTests extends ESTestCase { } @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new MergableCustomMetaData2(data); - } - - @Override - public String type() { + public String getWriteableName() { return TYPE; } + public static MergableCustomMetaData2 readFrom(StreamInput in) throws IOException { + return readFrom(MergableCustomMetaData2::new, in); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(TYPE, in); + } + + @Override public EnumSet context() { return EnumSet.of(MetaData.XContentContext.GATEWAY); diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java index 9735b83d9e6..1661c69c725 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java @@ -31,6 +31,7 @@ import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceImpl; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; @@ -74,10 +75,11 @@ public class AzureDiscoveryPlugin extends Plugin implements DiscoveryPlugin { @Override public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, ClusterService clusterService, UnicastHostsProvider hostsProvider) { // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider return Collections.singletonMap(AZURE, () -> - new ZenDiscovery(settings, threadPool, transportService, clusterService, hostsProvider)); + new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, clusterService, hostsProvider)); } @Override diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java index b7181270f62..e0b49696313 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java @@ -45,6 +45,7 @@ import org.elasticsearch.cloud.aws.AwsEc2Service; import org.elasticsearch.cloud.aws.AwsEc2ServiceImpl; import org.elasticsearch.cloud.aws.network.Ec2NameResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; @@ -101,10 +102,11 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close @Override public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, ClusterService clusterService, UnicastHostsProvider hostsProvider) { // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider return Collections.singletonMap(EC2, () -> - new ZenDiscovery(settings, threadPool, transportService, clusterService, hostsProvider)); + new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, clusterService, hostsProvider)); } @Override diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index f53abc4241c..41ba2f76a9b 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -33,6 +33,7 @@ import org.elasticsearch.cloud.gce.network.GceNameResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; @@ -99,10 +100,11 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close @Override public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, ClusterService clusterService, UnicastHostsProvider hostsProvider) { // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider return Collections.singletonMap(GCE, () -> - new ZenDiscovery(settings, threadPool, transportService, clusterService, hostsProvider)); + new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, clusterService, hostsProvider)); } @Override diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java index 5b938fce188..f8953833074 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java @@ -23,6 +23,7 @@ import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; @@ -45,9 +46,9 @@ public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin { } @Override - public Map getRepositories(Environment env) { + public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { return Collections.singletonMap(AzureRepository.TYPE, - (metadata) -> new AzureRepository(metadata, env, createStorageService(env.settings()))); + (metadata) -> new AzureRepository(metadata, env, namedXContentRegistry, createStorageService(env.settings()))); } @Override diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 2ae5fbe8493..1033578d52d 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -32,6 +32,7 @@ import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.common.Strings; @@ -84,9 +85,10 @@ public class AzureRepository extends BlobStoreRepository { private final boolean compress; private final boolean readonly; - public AzureRepository(RepositoryMetaData metadata, Environment environment, AzureStorageService storageService) + public AzureRepository(RepositoryMetaData metadata, Environment environment, + NamedXContentRegistry namedXContentRegistry, AzureStorageService storageService) throws IOException, URISyntaxException, StorageException { - super(metadata, environment.settings()); + super(metadata, environment.settings(), namedXContentRegistry); blobStore = new AzureBlobStore(metadata, environment.settings(), storageService); String container = getValue(metadata.settings(), settings, Repository.CONTAINER_SETTING, Storage.CONTAINER_SETTING); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 68b3783b03a..99b79fc3b32 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; import org.elasticsearch.cloud.azure.storage.AzureStorageSettings; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.test.ESTestCase; @@ -47,7 +48,8 @@ public class AzureRepositorySettingsTests extends ESTestCase { .putArray(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) .put(settings) .build(); - return new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings), new Environment(internalSettings), null); + return new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings), new Environment(internalSettings), + NamedXContentRegistry.EMPTY, null); } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/plugin/repository/gcs/GoogleCloudStoragePlugin.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/plugin/repository/gcs/GoogleCloudStoragePlugin.java index c0fa38e8b57..3d28922327e 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/plugin/repository/gcs/GoogleCloudStoragePlugin.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/plugin/repository/gcs/GoogleCloudStoragePlugin.java @@ -42,6 +42,7 @@ import com.google.api.services.storage.model.StorageObject; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; @@ -120,8 +121,8 @@ public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin } @Override - public Map getRepositories(Environment env) { + public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { return Collections.singletonMap(GoogleCloudStorageRepository.TYPE, - (metadata) -> new GoogleCloudStorageRepository(metadata, env, createStorageService(env))); + (metadata) -> new GoogleCloudStorageRepository(metadata, env, namedXContentRegistry, createStorageService(env))); } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index f7b74d5a4f8..c0a82f8266a 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugin.repository.gcs.GoogleCloudStoragePlugin; import org.elasticsearch.repositories.RepositoryException; @@ -72,8 +73,9 @@ public class GoogleCloudStorageRepository extends BlobStoreRepository { private final GoogleCloudStorageBlobStore blobStore; public GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment, + NamedXContentRegistry namedXContentRegistry, GoogleCloudStorageService storageService) throws Exception { - super(metadata, environment.settings()); + super(metadata, environment.settings(), namedXContentRegistry); String bucket = get(BUCKET, metadata); String application = get(APPLICATION_NAME, metadata); diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java index d4af26c3bbc..13da44b45f1 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java @@ -28,6 +28,7 @@ import java.util.Map; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; @@ -88,7 +89,7 @@ public final class HdfsPlugin extends Plugin implements RepositoryPlugin { } @Override - public Map getRepositories(Environment env) { - return Collections.singletonMap("hdfs", (metadata) -> new HdfsRepository(metadata, env)); + public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { + return Collections.singletonMap("hdfs", (metadata) -> new HdfsRepository(metadata, env, namedXContentRegistry)); } } diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index b111a5d0d0a..f1e5f83fe60 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -45,6 +45,7 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; @@ -60,8 +61,9 @@ public final class HdfsRepository extends BlobStoreRepository { // TODO: why 100KB? private static final ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(100, ByteSizeUnit.KB); - public HdfsRepository(RepositoryMetaData metadata, Environment environment) throws IOException { - super(metadata, environment.settings()); + public HdfsRepository(RepositoryMetaData metadata, Environment environment, + NamedXContentRegistry namedXContentRegistry) throws IOException { + super(metadata, environment.settings(), namedXContentRegistry); this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null); this.compress = metadata.settings().getAsBoolean("compress", false); @@ -88,7 +90,7 @@ public final class HdfsRepository extends BlobStoreRepository { if (pathSetting == null) { throw new IllegalArgumentException("No 'path' defined for hdfs snapshot/restore"); } - + int bufferSize = getMetadata().settings().getAsBytesSize("buffer_size", DEFAULT_BUFFER_SIZE).bytesAsInt(); try { @@ -110,7 +112,7 @@ public final class HdfsRepository extends BlobStoreRepository { } super.doStart(); } - + // create hadoop filecontext @SuppressForbidden(reason = "lesser of two evils (the other being a bunch of JNI/classloader nightmares)") private static FileContext createContext(URI uri, Settings repositorySettings) { diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java index 214d057d2f6..449c9ce411e 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java @@ -32,6 +32,7 @@ import org.elasticsearch.cloud.aws.AwsS3Service; import org.elasticsearch.cloud.aws.InternalAwsS3Service; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; @@ -71,9 +72,9 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { } @Override - public Map getRepositories(Environment env) { + public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { return Collections.singletonMap(S3Repository.TYPE, - (metadata) -> new S3Repository(metadata, env.settings(), createStorageService(env.settings()))); + (metadata) -> new S3Repository(metadata, env.settings(), namedXContentRegistry, createStorageService(env.settings()))); } @Override diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 7310b527158..1330fa17f80 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; @@ -273,8 +274,9 @@ public class S3Repository extends BlobStoreRepository { /** * Constructs an s3 backed repository */ - public S3Repository(RepositoryMetaData metadata, Settings settings, AwsS3Service s3Service) throws IOException { - super(metadata, settings); + public S3Repository(RepositoryMetaData metadata, Settings settings, + NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) throws IOException { + super(metadata, settings, namedXContentRegistry); String bucket = getValue(metadata.settings(), settings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING); if (bucket == null) { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 7e37cc1069d..86c27b2e668 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -94,7 +95,7 @@ public class S3RepositoryTests extends ESTestCase { RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() .put(Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB)) .put(Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB)).build()); - new S3Repository(metadata, Settings.EMPTY, new DummyS3Service()); + new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()); } private void assertInvalidBuffer(int bufferMB, int chunkMB, Class clazz, String msg) throws IOException { @@ -102,20 +103,21 @@ public class S3RepositoryTests extends ESTestCase { .put(Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB)) .put(Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB)).build()); - Exception e = expectThrows(clazz, () -> new S3Repository(metadata, Settings.EMPTY, new DummyS3Service())); + Exception e = expectThrows(clazz, () -> new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, + new DummyS3Service())); assertThat(e.getMessage(), containsString(msg)); } public void testBasePathSetting() throws IOException { RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() .put(Repository.BASE_PATH_SETTING.getKey(), "/foo/bar").build()); - S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, new DummyS3Service()); + S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()); assertEquals("foo/bar/", s3repo.basePath().buildAsString()); // make sure leading `/` is removed and trailing is added assertWarnings("S3 repository base_path" + " trimming the leading `/`, and leading `/` will not be supported for the S3 repository in future releases"); metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.EMPTY); Settings settings = Settings.builder().put(Repositories.BASE_PATH_SETTING.getKey(), "/foo/bar").build(); - s3repo = new S3Repository(metadata, settings, new DummyS3Service()); + s3repo = new S3Repository(metadata, settings, NamedXContentRegistry.EMPTY, new DummyS3Service()); assertEquals("foo/bar/", s3repo.basePath().buildAsString()); // make sure leading `/` is removed and trailing is added assertWarnings("S3 repository base_path" + " trimming the leading `/`, and leading `/` will not be supported for the S3 repository in future releases"); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index b7b825da472..125566d385a 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -36,6 +36,11 @@ public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa return true; } + @Override + protected boolean preserveReposUponCompletion() { + return true; + } + public UpgradeClusterClientYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) { super(testCandidate); } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yaml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yaml index a2b40cc54f7..c836ba73fa0 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yaml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yaml @@ -35,3 +35,15 @@ index: test_index - match: { hits.total: 10 } # 5 docs from old cluster, 5 docs from mixed cluster + +--- +"Verify custom cluster metadata still exists during upgrade": + - do: + snapshot.get_repository: + repository: my_repo + - is_true: my_repo + + - do: + ingest.get_pipeline: + id: "my_pipeline" + - match: { my_pipeline.description: "_description" } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yaml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yaml index f1f90cf9d22..98627e03419 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yaml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yaml @@ -32,3 +32,25 @@ index: test_index - match: { hits.total: 5 } + +--- +"Add stuff to cluster state so that we can verify that it remains to exist during and after the rolling upgrade": + - do: + snapshot.create_repository: + repository: my_repo + body: + type: url + settings: + url: "http://snapshot.test" + - match: { "acknowledged": true } + + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + ] + } + - match: { "acknowledged": true } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yaml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yaml index 03dcdc583d3..dc1f9fc1bbf 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yaml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yaml @@ -35,3 +35,15 @@ index: test_index - match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs + +--- +"Verify custom cluster metadata still exists after rolling upgrade": + - do: + snapshot.get_repository: + repository: my_repo + - is_true: my_repo + + - do: + ingest.get_pipeline: + id: "my_pipeline" + - match: { my_pipeline.description: "_description" } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index fd3af224a1f..038477cc3aa 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -61,6 +61,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -78,6 +79,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.regex.Regex; @@ -1085,10 +1087,18 @@ public abstract class ESIntegTestCase extends ESTestCase { */ protected void ensureClusterStateConsistency() throws IOException { if (cluster() != null && cluster().size() > 0) { + final NamedWriteableRegistry namedWriteableRegistry; + if (isInternalCluster()) { + // If it's internal cluster - using existing registry in case plugin registered custom data + namedWriteableRegistry = internalCluster().getInstance(NamedWriteableRegistry.class); + } else { + // If it's external cluster - fall back to the standard set + namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); + } ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState); // remove local node reference - masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null); + masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null, namedWriteableRegistry); Map masterStateMap = convertToMap(masterClusterState); int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; String masterId = masterClusterState.nodes().getMasterNodeId(); @@ -1096,7 +1106,7 @@ public abstract class ESIntegTestCase extends ESTestCase { ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState); // remove local node reference - localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null); + localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null, namedWriteableRegistry); final Map localStateMap = convertToMap(localClusterState); final int localClusterStateSize = ClusterState.Builder.toBytes(localClusterState).length; // Check that the non-master node has the same version of the cluster state as the master and @@ -2117,7 +2127,13 @@ public abstract class ESIntegTestCase extends ESTestCase { @Override protected NamedXContentRegistry xContentRegistry() { - return internalCluster().getInstance(NamedXContentRegistry.class); + if (isInternalCluster() && cluster().size() > 0) { + // If it's internal cluster - using existing registry in case plugin registered custom data + return internalCluster().getInstance(NamedXContentRegistry.class); + } else { + // If it's external cluster - fall back to the standard set + return new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); + } } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index ca4f846fe39..359fe1e7968 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -46,7 +46,9 @@ import org.apache.lucene.util.TimeUnits; import org.elasticsearch.Version; import org.elasticsearch.bootstrap.BootstrapForTesting; import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtilsForTesting; @@ -978,7 +980,7 @@ public abstract class ESTestCase extends LuceneTestCase { * The {@link NamedXContentRegistry} to use for this test. Subclasses should override and use liberally. */ protected NamedXContentRegistry xContentRegistry() { - return NamedXContentRegistry.EMPTY; + return new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); } /** Returns the suite failure marker: internal use only! */ diff --git a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java index 7f43c9de61b..81d8fa84a19 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.IndexFolderUpgrader; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.engine.Segment; @@ -131,7 +132,8 @@ public class OldIndexUtils { } } assertThat(indexFolders.toString(), indexFolders.size(), equalTo(1)); - final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, indexFolders.get(0)); + final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, + indexFolders.get(0)); assertNotNull(indexMetaData); assertThat(indexFolders.get(0).getFileName().toString(), equalTo(indexMetaData.getIndexUUID())); assertThat(indexMetaData.getCreationVersion(), equalTo(version)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCustomMetaData.java b/test/framework/src/main/java/org/elasticsearch/test/TestCustomMetaData.java index a655f17faca..fb709a7fd06 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCustomMetaData.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCustomMetaData.java @@ -21,15 +21,21 @@ package org.elasticsearch.test; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.AbstractNamedDiffable; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.function.Function; +import java.util.function.Supplier; -public abstract class TestCustomMetaData extends AbstractDiffable implements MetaData.Custom { +public abstract class TestCustomMetaData extends AbstractNamedDiffable implements MetaData.Custom { private final String data; protected TestCustomMetaData(String data) { @@ -57,11 +63,12 @@ public abstract class TestCustomMetaData extends AbstractDiffable T readFrom(Function supplier, StreamInput in) throws IOException { + return supplier.apply(in.readString()); + } - @Override - public MetaData.Custom readFrom(StreamInput in) throws IOException { - return newTestCustomMetaData(in.readString()); + public static NamedDiff readDiffFrom(String name, StreamInput in) throws IOException { + return readDiffFrom(MetaData.Custom.class, name, in); } @Override @@ -69,8 +76,9 @@ public abstract class TestCustomMetaData extends AbstractDiffable T fromXContent(Function supplier, XContentParser parser) + throws IOException { XContentParser.Token token; String data = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -91,7 +99,7 @@ public abstract class TestCustomMetaData extends AbstractDiffable> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, ClusterService clusterService, UnicastHostsProvider hostsProvider) { return Collections.singletonMap("test-zen", - () -> new TestZenDiscovery(settings, threadPool, transportService, clusterService, hostsProvider)); + () -> new TestZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, clusterService, hostsProvider)); } @Override @@ -71,8 +73,9 @@ public class TestZenDiscovery extends ZenDiscovery { } private TestZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, UnicastHostsProvider hostsProvider) { - super(settings, threadPool, transportService, clusterService, hostsProvider); + NamedWriteableRegistry namedWriteableRegistry, ClusterService clusterService, + UnicastHostsProvider hostsProvider) { + super(settings, threadPool, transportService, namedWriteableRegistry, clusterService, hostsProvider); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 975e6e2f866..1fc4cec9f80 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -174,6 +174,13 @@ public abstract class ESRestTestCase extends ESTestCase { return false; } + /** + * Returns whether to preserve the repositories on completion of this test. + */ + protected boolean preserveReposUponCompletion() { + return false; + } + private void wipeCluster() throws IOException { if (preserveIndicesUponCompletion() == false) { // wipe indices @@ -217,8 +224,10 @@ public abstract class ESRestTestCase extends ESTestCase { adminClient().performRequest("DELETE", "_snapshot/" + repoName + "/" + name); } } - logger.debug("wiping snapshot repository [{}]", repoName); - adminClient().performRequest("DELETE", "_snapshot/" + repoName); + if (preserveReposUponCompletion() == false) { + logger.debug("wiping snapshot repository [{}]", repoName); + adminClient().performRequest("DELETE", "_snapshot/" + repoName); + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index a35a1919cb7..a0344a6f86d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.transport; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.Lifecycle; @@ -93,7 +94,7 @@ public final class MockTransportService extends TransportService { public static MockTransportService createNewService(Settings settings, Version version, ThreadPool threadPool, @Nullable ClusterSettings clusterSettings) { - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); final Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(settings, Collections.emptyList()), version); return new MockTransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, clusterSettings); diff --git a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java index 61f7722a7c2..5f7e38b7ec1 100644 --- a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java +++ b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search; import org.apache.lucene.search.Query; +import org.elasticsearch.Version; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.search.Queries; @@ -32,10 +33,16 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TestSearchContext; public class MockSearchServiceTests extends ESTestCase { + public static final IndexMetaData EMPTY_INDEX_METADATA = IndexMetaData.builder("") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); + public void testAssertNoInFlightContext() { final long nowInMillis = randomNonNegativeLong(); - SearchContext s = new TestSearchContext(new QueryShardContext(0, new IndexSettings(IndexMetaData.PROTO, Settings.EMPTY), null, null, - null, null, null, xContentRegistry(), null, null, () -> nowInMillis)) { + SearchContext s = new TestSearchContext(new QueryShardContext(0, + new IndexSettings(EMPTY_INDEX_METADATA, Settings.EMPTY), null, null, null, null, null, xContentRegistry(), + null, null, () -> nowInMillis)) { + @Override public SearchShardTarget shardTarget() { return new SearchShardTarget("node", new Index("idx", "ignored"), 0); From 6e6d9eb25514f8e533ba7973f69a2aaf09de5a88 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 29 Dec 2016 10:58:15 +0100 Subject: [PATCH 034/119] Use a fresh recovery id when retrying recoveries (#22325) Recoveries are tracked on the target node using RecoveryTarget objects that are kept in a RecoveriesCollection. Each recovery has a unique id that is communicated from the recovery target to the source so that it can call back to the target and execute actions using the right recovery context. In case of a network disconnect, recoveries are retried. At the moment, the same recovery id is reused for the restarted recovery. This can lead to confusion though if the disconnect is unilateral and the recovery source continues with the recovery process. If the target reuses the same recovery id while doing a second attempt, there might be two concurrent recoveries running on the source for the same target. This commit changes the recovery retry process to use a fresh recovery id. It also waits for the first recovery attempt to be fully finished (all resources locally freed) to further prevent concurrent access to the shard. Finally, in case of primary relocation, it also fails a second recovery attempt if the first attempt moved past the finalization step, as the relocation source can then be moved to RELOCATED state and start indexing as primary into the target shard (see TransportReplicationAction). Resetting the target shard in this state could mean that indexing is halted until the recovery retry attempt is completed and could also destroy existing documents indexed and acknowledged before the reset. Relates to #22043 --- .../recovery/PeerRecoveryTargetService.java | 195 ++++++++++-------- .../recovery/RecoveriesCollection.java | 93 ++++++--- .../indices/recovery/RecoverySettings.java | 2 +- .../recovery/RecoverySourceHandler.java | 2 + .../indices/recovery/RecoveryTarget.java | 60 ++++-- .../indices/recovery/IndexRecoveryIT.java | 131 +++++++++++- .../recovery/RecoveriesCollectionTests.java | 54 ++--- 7 files changed, 372 insertions(+), 165 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 11554358b30..894399e851e 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -48,6 +48,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.store.Store; +import org.elasticsearch.indices.recovery.RecoveriesCollection.RecoveryRef; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; @@ -139,65 +140,85 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde threadPool.generic().execute(new RecoveryRunner(recoveryId)); } - protected void retryRecovery(final RecoveryTarget recoveryTarget, final Throwable reason, TimeValue retryAfter, final - StartRecoveryRequest currentRequest) { + protected void retryRecovery(final long recoveryId, final Throwable reason, TimeValue retryAfter, TimeValue activityTimeout) { logger.trace( (Supplier) () -> new ParameterizedMessage( - "will retry recovery with id [{}] in [{}]", recoveryTarget.recoveryId(), retryAfter), reason); - retryRecovery(recoveryTarget, retryAfter, currentRequest); + "will retry recovery with id [{}] in [{}]", recoveryId, retryAfter), reason); + retryRecovery(recoveryId, retryAfter, activityTimeout); } - protected void retryRecovery(final RecoveryTarget recoveryTarget, final String reason, TimeValue retryAfter, final - StartRecoveryRequest currentRequest) { - logger.trace("will retry recovery with id [{}] in [{}] (reason [{}])", recoveryTarget.recoveryId(), retryAfter, reason); - retryRecovery(recoveryTarget, retryAfter, currentRequest); + protected void retryRecovery(final long recoveryId, final String reason, TimeValue retryAfter, TimeValue activityTimeout) { + logger.trace("will retry recovery with id [{}] in [{}] (reason [{}])", recoveryId, retryAfter, reason); + retryRecovery(recoveryId, retryAfter, activityTimeout); } - private void retryRecovery(final RecoveryTarget recoveryTarget, TimeValue retryAfter, final StartRecoveryRequest currentRequest) { - try { - onGoingRecoveries.resetRecovery(recoveryTarget.recoveryId(), recoveryTarget.shardId()); - } catch (Exception e) { - onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(currentRequest, e), true); + private void retryRecovery(final long recoveryId, TimeValue retryAfter, TimeValue activityTimeout) { + RecoveryTarget newTarget = onGoingRecoveries.resetRecovery(recoveryId, activityTimeout); + if (newTarget != null) { + threadPool.schedule(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(newTarget.recoveryId())); } - threadPool.schedule(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(recoveryTarget.recoveryId())); } - private void doRecovery(final RecoveryTarget recoveryTarget) { - assert recoveryTarget.sourceNode() != null : "can't do a recovery without a source node"; + private void doRecovery(final long recoveryId) { + final StartRecoveryRequest request; + final CancellableThreads cancellableThreads; + final RecoveryState.Timer timer; - logger.trace("collecting local files for {}", recoveryTarget); - Store.MetadataSnapshot metadataSnapshot = null; - try { - if (recoveryTarget.indexShard().indexSettings().isOnSharedFilesystem()) { - // we are not going to copy any files, so don't bother listing files, potentially running - // into concurrency issues with the primary changing files underneath us. - metadataSnapshot = Store.MetadataSnapshot.EMPTY; - } else { - metadataSnapshot = recoveryTarget.indexShard().snapshotStoreMetadata(); + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { + if (recoveryRef == null) { + logger.trace("not running recovery with id [{}] - can't find it (probably finished)", recoveryId); + return; } - } catch (org.apache.lucene.index.IndexNotFoundException e) { - // happens on an empty folder. no need to log - metadataSnapshot = Store.MetadataSnapshot.EMPTY; - } catch (IOException e) { - logger.warn("error while listing local files, recover as if there are none", e); - metadataSnapshot = Store.MetadataSnapshot.EMPTY; - } catch (Exception e) { - // this will be logged as warning later on... - logger.trace("unexpected error while listing local files, failing recovery", e); - onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), - new RecoveryFailedException(recoveryTarget.state(), "failed to list local files", e), true); - return; - } - logger.trace("{} local file count: [{}]", recoveryTarget, metadataSnapshot.size()); - final StartRecoveryRequest request = new StartRecoveryRequest(recoveryTarget.shardId(), recoveryTarget.sourceNode(), - clusterService.localNode(), metadataSnapshot, recoveryTarget.state().getPrimary(), recoveryTarget.recoveryId()); + RecoveryTarget recoveryTarget = recoveryRef.target(); + assert recoveryTarget.sourceNode() != null : "can't do a recovery without a source node"; + + logger.trace("collecting local files for {}", recoveryTarget.sourceNode()); + Store.MetadataSnapshot metadataSnapshot; + try { + if (recoveryTarget.indexShard().indexSettings().isOnSharedFilesystem()) { + // we are not going to copy any files, so don't bother listing files, potentially running + // into concurrency issues with the primary changing files underneath us. + metadataSnapshot = Store.MetadataSnapshot.EMPTY; + } else { + metadataSnapshot = recoveryTarget.indexShard().snapshotStoreMetadata(); + } + logger.trace("{} local file count: [{}]", recoveryTarget, metadataSnapshot.size()); + } catch (org.apache.lucene.index.IndexNotFoundException e) { + // happens on an empty folder. no need to log + logger.trace("{} shard folder empty, recover all files", recoveryTarget); + metadataSnapshot = Store.MetadataSnapshot.EMPTY; + } catch (IOException e) { + logger.warn("error while listing local files, recover as if there are none", e); + metadataSnapshot = Store.MetadataSnapshot.EMPTY; + } catch (Exception e) { + // this will be logged as warning later on... + logger.trace("unexpected error while listing local files, failing recovery", e); + onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), + new RecoveryFailedException(recoveryTarget.state(), "failed to list local files", e), true); + return; + } + + try { + logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); + recoveryTarget.indexShard().prepareForIndexRecovery(); + + request = new StartRecoveryRequest(recoveryTarget.shardId(), recoveryTarget.sourceNode(), + clusterService.localNode(), metadataSnapshot, recoveryTarget.state().getPrimary(), recoveryTarget.recoveryId()); + cancellableThreads = recoveryTarget.CancellableThreads(); + timer = recoveryTarget.state().getTimer(); + } catch (Exception e) { + // this will be logged as warning later on... + logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e); + onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), + new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), true); + return; + } + } - final AtomicReference responseHolder = new AtomicReference<>(); try { - logger.trace("[{}][{}] starting recovery from {}", request.shardId().getIndex().getName(), request.shardId().id(), request - .sourceNode()); - recoveryTarget.indexShard().prepareForIndexRecovery(); - recoveryTarget.CancellableThreads().execute(() -> responseHolder.set( + logger.trace("{} starting recovery from {}", request.shardId(), request.sourceNode()); + final AtomicReference responseHolder = new AtomicReference<>(); + cancellableThreads.execute(() -> responseHolder.set( transportService.submitRequest(request.sourceNode(), PeerRecoverySourceService.Actions.START_RECOVERY, request, new FutureTransportResponseHandler() { @Override @@ -207,9 +228,9 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde }).txGet())); final RecoveryResponse recoveryResponse = responseHolder.get(); assert responseHolder != null; - final TimeValue recoveryTime = new TimeValue(recoveryTarget.state().getTimer().time()); + final TimeValue recoveryTime = new TimeValue(timer.time()); // do this through ongoing recoveries to remove it from the collection - onGoingRecoveries.markRecoveryAsDone(recoveryTarget.recoveryId()); + onGoingRecoveries.markRecoveryAsDone(recoveryId); if (logger.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append('[').append(request.shardId().getIndex().getName()).append(']').append('[').append(request.shardId().id()) @@ -229,7 +250,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde .append("\n"); logger.trace("{}", sb); } else { - logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryTarget.sourceNode(), recoveryTime); + logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), request.sourceNode(), recoveryTime); } } catch (CancellableThreads.ExecutionCancelledException e) { logger.trace("recovery cancelled", e); @@ -245,8 +266,8 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof CancellableThreads.ExecutionCancelledException) { // this can also come from the source wrapped in a RemoteTransportException - onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(request, "source has canceled the" + - " recovery", cause), false); + onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, + "source has canceled the recovery", cause), false); return; } if (cause instanceof RecoveryEngineException) { @@ -262,31 +283,34 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde // here, we would add checks against exception that need to be retried (and not removeAndClean in this case) - if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || cause instanceof - ShardNotFoundException) { + if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || + cause instanceof ShardNotFoundException) { // if the target is not ready yet, retry - retryRecovery(recoveryTarget, "remote shard not ready", recoverySettings.retryDelayStateSync(), request); + retryRecovery(recoveryId, "remote shard not ready", recoverySettings.retryDelayStateSync(), + recoverySettings.activityTimeout()); return; } if (cause instanceof DelayRecoveryException) { - retryRecovery(recoveryTarget, cause, recoverySettings.retryDelayStateSync(), request); + retryRecovery(recoveryId, cause, recoverySettings.retryDelayStateSync(), + recoverySettings.activityTimeout()); return; } if (cause instanceof ConnectTransportException) { - logger.debug("delaying recovery of {} for [{}] due to networking error [{}]", recoveryTarget.shardId(), recoverySettings - .retryDelayNetwork(), cause.getMessage()); - retryRecovery(recoveryTarget, cause.getMessage(), recoverySettings.retryDelayNetwork(), request); + logger.debug("delaying recovery of {} for [{}] due to networking error [{}]", request.shardId(), + recoverySettings.retryDelayNetwork(), cause.getMessage()); + retryRecovery(recoveryId, cause.getMessage(), recoverySettings.retryDelayNetwork(), + recoverySettings.activityTimeout()); return; } if (cause instanceof AlreadyClosedException) { - onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(request, "source shard is " + - "closed", cause), false); + onGoingRecoveries.failRecovery(recoveryId, + new RecoveryFailedException(request, "source shard is closed", cause), false); return; } - onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(request, e), true); + onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, e), true); } } @@ -300,9 +324,9 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde @Override public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception { - try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { - recoveryRef.status().prepareForTranslogOperations(request.totalTranslogOps(), request.getMaxUnsafeAutoIdTimestamp()); + recoveryRef.target().prepareForTranslogOperations(request.totalTranslogOps(), request.getMaxUnsafeAutoIdTimestamp()); } channel.sendResponse(TransportResponse.Empty.INSTANCE); } @@ -312,9 +336,9 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde @Override public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel) throws Exception { - try (RecoveriesCollection.RecoveryRef recoveryRef = + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { - recoveryRef.status().finalizeRecovery(request.globalCheckpoint()); + recoveryRef.target().finalizeRecovery(request.globalCheckpoint()); } channel.sendResponse(TransportResponse.Empty.INSTANCE); } @@ -324,9 +348,9 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde @Override public void messageReceived(RecoveryWaitForClusterStateRequest request, TransportChannel channel) throws Exception { - try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { - recoveryRef.status().ensureClusterStateVersion(request.clusterStateVersion()); + recoveryRef.target().ensureClusterStateVersion(request.clusterStateVersion()); } channel.sendResponse(TransportResponse.Empty.INSTANCE); } @@ -336,10 +360,10 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde @Override public void messageReceived(final RecoveryTranslogOperationsRequest request, final TransportChannel channel) throws IOException { - try (RecoveriesCollection.RecoveryRef recoveryRef = + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); - final RecoveryTarget recoveryTarget = recoveryRef.status(); + final RecoveryTarget recoveryTarget = recoveryRef.target(); try { recoveryTarget.indexTranslogOperations(request.operations(), request.totalTranslogOps()); channel.sendResponse(TransportResponse.Empty.INSTANCE); @@ -443,9 +467,9 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde @Override public void messageReceived(RecoveryFilesInfoRequest request, TransportChannel channel) throws Exception { - try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { - recoveryRef.status().receiveFileInfo(request.phase1FileNames, request.phase1FileSizes, request.phase1ExistingFileNames, + recoveryRef.target().receiveFileInfo(request.phase1FileNames, request.phase1FileSizes, request.phase1ExistingFileNames, request.phase1ExistingFileSizes, request.totalTranslogOps); channel.sendResponse(TransportResponse.Empty.INSTANCE); } @@ -456,9 +480,9 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde @Override public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception { - try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { - recoveryRef.status().cleanFiles(request.totalTranslogOps(), request.sourceMetaSnapshot()); + recoveryRef.target().cleanFiles(request.totalTranslogOps(), request.sourceMetaSnapshot()); channel.sendResponse(TransportResponse.Empty.INSTANCE); } } @@ -471,10 +495,10 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde @Override public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception { - try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { - final RecoveryTarget status = recoveryRef.status(); - final RecoveryState.Index indexState = status.state().getIndex(); + final RecoveryTarget recoveryTarget = recoveryRef.target(); + final RecoveryState.Index indexState = recoveryTarget.state().getIndex(); if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) { indexState.addSourceThrottling(request.sourceThrottleTimeInNanos()); } @@ -487,11 +511,11 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde bytesSinceLastPause.addAndGet(-bytes); long throttleTimeInNanos = rateLimiter.pause(bytes); indexState.addTargetThrottling(throttleTimeInNanos); - status.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos); + recoveryTarget.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos); } } - status.writeFileChunk(request.metadata(), request.position(), request.content(), + recoveryTarget.writeFileChunk(request.metadata(), request.position(), request.content(), request.lastChunk(), request.totalTranslogOps() ); } @@ -509,13 +533,13 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde @Override public void onFailure(Exception e) { - try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { if (recoveryRef != null) { logger.error( (Supplier) () -> new ParameterizedMessage( "unexpected error during recovery [{}], failing shard", recoveryId), e); onGoingRecoveries.failRecovery(recoveryId, - new RecoveryFailedException(recoveryRef.status().state(), "unexpected error", e), + new RecoveryFailedException(recoveryRef.target().state(), "unexpected error", e), true // be safe ); } else { @@ -528,16 +552,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde @Override public void doRun() { - RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId); - if (recoveryRef == null) { - logger.trace("not running recovery with id [{}] - can't find it (probably finished)", recoveryId); - return; - } - try { - doRecovery(recoveryRef.status()); - } finally { - recoveryRef.close(); - } + doRecovery(recoveryId); } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index 65a48b18e22..2ed79a8a996 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -33,7 +33,9 @@ import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; -import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; @@ -65,13 +67,18 @@ public class RecoveriesCollection { */ public long startRecovery(IndexShard indexShard, DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener, TimeValue activityTimeout) { - RecoveryTarget status = new RecoveryTarget(indexShard, sourceNode, listener, ensureClusterStateVersionCallback); - RecoveryTarget existingStatus = onGoingRecoveries.putIfAbsent(status.recoveryId(), status); - assert existingStatus == null : "found two RecoveryStatus instances with the same id"; - logger.trace("{} started recovery from {}, id [{}]", indexShard.shardId(), sourceNode, status.recoveryId()); + RecoveryTarget recoveryTarget = new RecoveryTarget(indexShard, sourceNode, listener, ensureClusterStateVersionCallback); + startRecoveryInternal(recoveryTarget, activityTimeout); + return recoveryTarget.recoveryId(); + } + + private void startRecoveryInternal(RecoveryTarget recoveryTarget, TimeValue activityTimeout) { + RecoveryTarget existingTarget = onGoingRecoveries.putIfAbsent(recoveryTarget.recoveryId(), recoveryTarget); + assert existingTarget == null : "found two RecoveryStatus instances with the same id"; + logger.trace("{} started recovery from {}, id [{}]", recoveryTarget.shardId(), recoveryTarget.sourceNode(), + recoveryTarget.recoveryId()); threadPool.schedule(activityTimeout, ThreadPool.Names.GENERIC, - new RecoveryMonitor(status.recoveryId(), status.lastAccessTime(), activityTimeout)); - return status.recoveryId(); + new RecoveryMonitor(recoveryTarget.recoveryId(), recoveryTarget.lastAccessTime(), activityTimeout)); } @@ -79,22 +86,48 @@ public class RecoveriesCollection { * Resets the recovery and performs a recovery restart on the currently recovering index shard * * @see IndexShard#performRecoveryRestart() + * @return newly created RecoveryTarget */ - public void resetRecovery(long id, ShardId shardId) throws IOException { - try (RecoveryRef ref = getRecoverySafe(id, shardId)) { - // instead of adding complicated state to RecoveryTarget we just flip the - // target instance when we reset a recovery, that way we have only one cleanup - // path on the RecoveryTarget and are always within the bounds of ref-counting - // which is important since we verify files are on disk etc. after we have written them etc. - RecoveryTarget status = ref.status(); - RecoveryTarget resetRecovery = status.resetRecovery(); - if (onGoingRecoveries.replace(id, status, resetRecovery) == false) { - resetRecovery.cancel("replace failed"); // this is important otherwise we leak a reference to the store - throw new IllegalStateException("failed to replace recovery target"); + public RecoveryTarget resetRecovery(final long recoveryId, TimeValue activityTimeout) { + RecoveryTarget oldRecoveryTarget = null; + final RecoveryTarget newRecoveryTarget; + + try { + synchronized (onGoingRecoveries) { + // swap recovery targets in a synchronized block to ensure that the newly added recovery target is picked up by + // cancelRecoveriesForShard whenever the old recovery target is picked up + oldRecoveryTarget = onGoingRecoveries.remove(recoveryId); + if (oldRecoveryTarget == null) { + return null; + } + + newRecoveryTarget = oldRecoveryTarget.retryCopy(); + startRecoveryInternal(newRecoveryTarget, activityTimeout); } + + // Closes the current recovery target + final RecoveryTarget finalOldRecoveryTarget = oldRecoveryTarget; + final AtomicBoolean successfulReset = new AtomicBoolean(); + newRecoveryTarget.CancellableThreads().executeIO(() -> successfulReset.set(finalOldRecoveryTarget.resetRecovery())); + if (successfulReset.get() == false) { + cancelRecovery(newRecoveryTarget.recoveryId(), "failed to reset recovery"); + return null; + } else { + logger.trace("{} restarted recovery from {}, id [{}], previous id [{}]", newRecoveryTarget.shardId(), + newRecoveryTarget.sourceNode(), newRecoveryTarget.recoveryId(), oldRecoveryTarget.recoveryId()); + return newRecoveryTarget; + } + } catch (Exception e) { + // fail shard to be safe + oldRecoveryTarget.notifyListener(new RecoveryFailedException(oldRecoveryTarget.state(), "failed to retry recovery", e), true); + return null; } } + public RecoveryTarget getRecoveryTarget(long id) { + return onGoingRecoveries.get(id); + } + /** * gets the {@link RecoveryTarget } for a given id. The RecoveryStatus returned has it's ref count already incremented * to make sure it's safe to use. However, you must call {@link RecoveryTarget#decRef()} when you are done with it, typically @@ -116,7 +149,7 @@ public class RecoveriesCollection { if (recoveryRef == null) { throw new IndexShardClosedException(shardId); } - assert recoveryRef.status().shardId().equals(shardId); + assert recoveryRef.target().shardId().equals(shardId); return recoveryRef; } @@ -143,7 +176,8 @@ public class RecoveriesCollection { public void failRecovery(long id, RecoveryFailedException e, boolean sendShardFailure) { RecoveryTarget removed = onGoingRecoveries.remove(id); if (removed != null) { - logger.trace("{} failing recovery from {}, id [{}]. Send shard failure: [{}]", removed.shardId(), removed.sourceNode(), removed.recoveryId(), sendShardFailure); + logger.trace("{} failing recovery from {}, id [{}]. Send shard failure: [{}]", removed.shardId(), removed.sourceNode(), + removed.recoveryId(), sendShardFailure); removed.fail(e, sendShardFailure); } } @@ -171,11 +205,22 @@ public class RecoveriesCollection { */ public boolean cancelRecoveriesForShard(ShardId shardId, String reason) { boolean cancelled = false; - for (RecoveryTarget status : onGoingRecoveries.values()) { - if (status.shardId().equals(shardId)) { - cancelled |= cancelRecovery(status.recoveryId(), reason); + List matchedRecoveries = new ArrayList<>(); + synchronized (onGoingRecoveries) { + for (Iterator it = onGoingRecoveries.values().iterator(); it.hasNext(); ) { + RecoveryTarget status = it.next(); + if (status.shardId().equals(shardId)) { + matchedRecoveries.add(status); + it.remove(); + } } } + for (RecoveryTarget removed : matchedRecoveries) { + logger.trace("{} canceled recovery from {}, id [{}] (reason [{}])", + removed.shardId(), removed.sourceNode(), removed.recoveryId(), reason); + removed.cancel(reason); + cancelled = true; + } return cancelled; } @@ -205,7 +250,7 @@ public class RecoveriesCollection { } } - public RecoveryTarget status() { + public RecoveryTarget target() { return status; } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index d4ddccd8742..e238277b698 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -69,7 +69,7 @@ public class RecoverySettings extends AbstractComponent { */ public static final Setting INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.recovery_activity_timeout", - INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING::get, TimeValue.timeValueSeconds(0), + INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING::get, TimeValue.timeValueSeconds(0), Property.Dynamic, Property.NodeScope); public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index fa1a9a7979a..be055531813 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -148,6 +148,8 @@ public class RecoverySourceHandler { // engine was just started at the end of phase 1 if (shard.state() == IndexShardState.RELOCATED) { + assert request.isPrimaryRelocation() == false : + "recovery target should not retry primary relocation if previous attempt made it past finalization step"; /** * The primary shard has been relocated while we copied files. This means that we can't guarantee any more that all * operations that were replicated during the file copy (when the target engine was not yet opened) will be present in the diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 67ee1a5ac9a..981a5f3ed8f 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -55,6 +55,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -87,17 +88,11 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget // last time this status was accessed private volatile long lastAccessTime = System.nanoTime(); + // latch that can be used to blockingly wait for RecoveryTarget to be closed + private final CountDownLatch closedLatch = new CountDownLatch(1); + private final Map tempFileNames = ConcurrentCollections.newConcurrentMap(); - private RecoveryTarget(RecoveryTarget copyFrom) { // copy constructor - this(copyFrom.indexShard, copyFrom.sourceNode, copyFrom.listener, copyFrom.cancellableThreads, copyFrom.recoveryId, - copyFrom.ensureClusterStateVersionCallback); - } - - public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener, - Callback ensureClusterStateVersionCallback) { - this(indexShard, sourceNode, listener, new CancellableThreads(), idGenerator.incrementAndGet(), ensureClusterStateVersionCallback); - } /** * creates a new recovery target object that represents a recovery to the provided indexShard * @@ -108,11 +103,11 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget * version. Necessary for primary relocation so that new primary knows about all other ongoing * replica recoveries when replicating documents (see {@link RecoverySourceHandler}). */ - private RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener, - CancellableThreads cancellableThreads, long recoveryId, Callback ensureClusterStateVersionCallback) { + public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener, + Callback ensureClusterStateVersionCallback) { super("recovery_status"); - this.cancellableThreads = cancellableThreads; - this.recoveryId = recoveryId; + this.cancellableThreads = new CancellableThreads(); + this.recoveryId = idGenerator.incrementAndGet(); this.listener = listener; this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); this.indexShard = indexShard; @@ -126,6 +121,13 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget indexShard.recoveryStats().incCurrentAsTarget(); } + /** + * returns a fresh RecoveryTarget to retry recovery from the same source node onto the same IndexShard and using the same listener + */ + public RecoveryTarget retryCopy() { + return new RecoveryTarget(this.indexShard, this.sourceNode, this.listener, this.ensureClusterStateVersionCallback); + } + public long recoveryId() { return recoveryId; } @@ -177,19 +179,28 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget } /** - * Closes the current recovery target and returns a - * clone to reset the ongoing recovery. - * Note: the returned target must be canceled, failed or finished - * in order to release all it's reference. + * Closes the current recovery target and waits up to a certain timeout for resources to be freed. + * Returns true if resetting the recovery was successful, false if the recovery target is already cancelled / failed or marked as done. */ - RecoveryTarget resetRecovery() throws IOException { - ensureRefCount(); + boolean resetRecovery() throws InterruptedException, IOException { if (finished.compareAndSet(false, true)) { + logger.debug("reset of recovery with shard {} and id [{}]", shardId, recoveryId); // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now decRef(); + closedLatch.await(); + RecoveryState.Stage stage = indexShard.recoveryState().getStage(); + if (indexShard.recoveryState().getPrimary() && (stage == RecoveryState.Stage.FINALIZE || stage == RecoveryState.Stage.DONE)) { + // once primary relocation has moved past the finalization step, the relocation source can be moved to RELOCATED state + // and start indexing as primary into the target shard (see TransportReplicationAction). Resetting the target shard in this + // state could mean that indexing is halted until the recovery retry attempt is completed and could also destroy existing + // documents indexed and acknowledged before the reset. + assert stage != RecoveryState.Stage.DONE : "recovery should not have completed when it's being reset"; + throw new IllegalStateException("cannot reset recovery as previous attempt made it past finalization step"); + } + indexShard.performRecoveryRestart(); + return true; } - indexShard.performRecoveryRestart(); - return new RecoveryTarget(this); + return false; } /** @@ -220,7 +231,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget public void fail(RecoveryFailedException e, boolean sendShardFailure) { if (finished.compareAndSet(false, true)) { try { - listener.onRecoveryFailure(state(), e, sendShardFailure); + notifyListener(e, sendShardFailure); } finally { try { cancellableThreads.cancel("failed recovery [" + ExceptionsHelper.stackTrace(e) + "]"); @@ -232,6 +243,10 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget } } + public void notifyListener(RecoveryFailedException e, boolean sendShardFailure) { + listener.onRecoveryFailure(state(), e, sendShardFailure); + } + /** mark the current recovery as done */ public void markAsDone() { if (finished.compareAndSet(false, true)) { @@ -309,6 +324,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget // free store. increment happens in constructor store.decRef(); indexShard.recoveryStats().decCurrentAsTarget(); + closedLatch.countDown(); } } diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 4e3c516aa72..6bbccb4cfb7 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -31,8 +31,8 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; @@ -42,6 +42,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.store.Store; @@ -55,12 +56,12 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.store.MockFSDirectoryService; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; @@ -73,6 +74,8 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -653,4 +656,128 @@ public class IndexRecoveryIT extends ESIntegTestCase { super.sendRequest(connection, requestId, action, request, options); } } + + /** + * Tests scenario where recovery target successfully sends recovery request to source but then the channel gets closed while + * the source is working on the recovery process. + */ + @TestLogging("_root:DEBUG,org.elasticsearch.indices.recovery:TRACE") + public void testDisconnectsDuringRecovery() throws Exception { + boolean primaryRelocation = randomBoolean(); + final String indexName = "test"; + final Settings nodeSettings = Settings.builder() + .put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), TimeValue.timeValueMillis(randomIntBetween(0, 100))) + .build(); + TimeValue disconnectAfterDelay = TimeValue.timeValueMillis(randomIntBetween(0, 100)); + // start a master node + String masterNodeName = internalCluster().startMasterOnlyNode(nodeSettings); + + final String blueNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "blue").put(nodeSettings).build()); + final String redNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build()); + + client().admin().indices().prepareCreate(indexName) + .setSettings( + Settings.builder() + .put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue") + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + ).get(); + + List requests = new ArrayList<>(); + int numDocs = scaledRandomIntBetween(25, 250); + for (int i = 0; i < numDocs; i++) { + requests.add(client().prepareIndex(indexName, "type").setSource("{}")); + } + indexRandom(true, requests); + ensureSearchable(indexName); + assertHitCount(client().prepareSearch(indexName).get(), numDocs); + + MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, masterNodeName); + MockTransportService blueMockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, blueNodeName); + MockTransportService redMockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, redNodeName); + + redMockTransportService.addDelegate(blueMockTransportService, new MockTransportService.DelegateTransport(redMockTransportService.original()) { + private final AtomicInteger count = new AtomicInteger(); + + @Override + protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, + TransportRequestOptions options) throws IOException { + logger.info("--> sending request {} on {}", action, connection.getNode()); + if (PeerRecoverySourceService.Actions.START_RECOVERY.equals(action) && count.incrementAndGet() == 1) { + // ensures that it's considered as valid recovery attempt by source + try { + awaitBusy(() -> client(blueNodeName).admin().cluster().prepareState().setLocal(true).get() + .getState().getRoutingTable().index("test").shard(0).getAllInitializingShards().isEmpty() == false); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + super.sendRequest(connection, requestId, action, request, options); + try { + Thread.sleep(disconnectAfterDelay.millis()); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + throw new ConnectTransportException(connection.getNode(), "DISCONNECT: simulation disconnect after successfully sending " + action + " request"); + } else { + super.sendRequest(connection, requestId, action, request, options); + } + } + }); + + final AtomicBoolean seenWaitForClusterState = new AtomicBoolean(); + blueMockTransportService.addDelegate(redMockTransportService, new MockTransportService.DelegateTransport(blueMockTransportService.original()) { + @Override + protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, + TransportRequestOptions options) throws IOException { + logger.info("--> sending request {} on {}", action, connection.getNode()); + if (action.equals(PeerRecoveryTargetService.Actions.WAIT_CLUSTERSTATE)) { + seenWaitForClusterState.set(true); + } + super.sendRequest(connection, requestId, action, request, options); + } + }); + + for (MockTransportService mockTransportService : Arrays.asList(redMockTransportService, blueMockTransportService)) { + mockTransportService.addDelegate(masterTransportService, new MockTransportService.DelegateTransport(mockTransportService.original()) { + @Override + protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, + TransportRequestOptions options) throws IOException { + logger.info("--> sending request {} on {}", action, connection.getNode()); + if (primaryRelocation == false || seenWaitForClusterState.get() == false) { + assertNotEquals(action, ShardStateAction.SHARD_FAILED_ACTION_NAME); + } + super.sendRequest(connection, requestId, action, request, options); + } + }); + } + + if (primaryRelocation) { + logger.info("--> starting primary relocation recovery from blue to red"); + client().admin().indices().prepareUpdateSettings(indexName).setSettings( + Settings.builder() + .put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "red") + ).get(); + + ensureGreen(); // also waits for relocation / recovery to complete + // if a primary relocation fails after the source shard has been marked as relocated, both source and target are failed. If the + // source shard is moved back to started because the target fails first, it's possible that there is a cluster state where the + // shard is marked as started again (and ensureGreen returns), but while applying the cluster state the primary is failed and + // will be reallocated. The cluster will thus become green, then red, then green again. Triggering a refresh here before + // searching helps, as in contrast to search actions, refresh waits for the closed shard to be reallocated. + client().admin().indices().prepareRefresh(indexName).get(); + } else { + logger.info("--> starting replica recovery from blue to red"); + client().admin().indices().prepareUpdateSettings(indexName).setSettings( + Settings.builder() + .put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "red,blue") + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + ).get(); + + ensureGreen(); + } + + for (int i = 0; i < 10; i++) { + assertHitCount(client().prepareSearch(indexName).get(), numDocs); + } + } } diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java index 5b1073beffa..bbc434120d8 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java @@ -24,10 +24,12 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.replication.ESIndexLevelReplicationTestCase; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveriesCollection; import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; +import org.elasticsearch.indices.recovery.RecoveryTarget; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -54,10 +56,10 @@ public class RecoveriesCollectionTests extends ESIndexLevelReplicationTestCase { final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool, v -> {}); final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica()); try (RecoveriesCollection.RecoveryRef status = collection.getRecovery(recoveryId)) { - final long lastSeenTime = status.status().lastAccessTime(); + final long lastSeenTime = status.target().lastAccessTime(); assertBusy(() -> { try (RecoveriesCollection.RecoveryRef currentStatus = collection.getRecovery(recoveryId)) { - assertThat("access time failed to update", lastSeenTime, lessThan(currentStatus.status().lastAccessTime())); + assertThat("access time failed to update", lastSeenTime, lessThan(currentStatus.target().lastAccessTime())); } }); } finally { @@ -100,7 +102,7 @@ public class RecoveriesCollectionTests extends ESIndexLevelReplicationTestCase { final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica()); final long recoveryId2 = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica()); try (RecoveriesCollection.RecoveryRef recoveryRef = collection.getRecovery(recoveryId)) { - ShardId shardId = recoveryRef.status().shardId(); + ShardId shardId = recoveryRef.target().shardId(); assertTrue("failed to cancel recoveries", collection.cancelRecoveriesForShard(shardId, "test")); assertThat("all recoveries should be cancelled", collection.size(), equalTo(0)); } finally { @@ -118,30 +120,30 @@ public class RecoveriesCollectionTests extends ESIndexLevelReplicationTestCase { final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool, v -> {}); IndexShard shard = shards.addReplica(); final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shard); - try (RecoveriesCollection.RecoveryRef recovery = collection.getRecovery(recoveryId)) { - final int currentAsTarget = shard.recoveryStats().currentAsTarget(); - final int referencesToStore = recovery.status().store().refCount(); - String tempFileName = recovery.status().getTempNameForFile("foobar"); - collection.resetRecovery(recoveryId, recovery.status().shardId()); - try (RecoveriesCollection.RecoveryRef resetRecovery = collection.getRecovery(recoveryId)) { - assertNotSame(recovery.status(), resetRecovery); - assertSame(recovery.status().CancellableThreads(), resetRecovery.status().CancellableThreads()); - assertSame(recovery.status().indexShard(), resetRecovery.status().indexShard()); - assertSame(recovery.status().store(), resetRecovery.status().store()); - assertEquals(referencesToStore + 1, resetRecovery.status().store().refCount()); - assertEquals(currentAsTarget+1, shard.recoveryStats().currentAsTarget()); // we blink for a short moment... - recovery.close(); - expectThrows(ElasticsearchException.class, () -> recovery.status().store()); - assertEquals(referencesToStore, resetRecovery.status().store().refCount()); - String resetTempFileName = resetRecovery.status().getTempNameForFile("foobar"); - assertNotEquals(tempFileName, resetTempFileName); - } - assertEquals(currentAsTarget, shard.recoveryStats().currentAsTarget()); - } - try (RecoveriesCollection.RecoveryRef resetRecovery = collection.getRecovery(recoveryId)) { + RecoveryTarget recoveryTarget = collection.getRecoveryTarget(recoveryId); + final int currentAsTarget = shard.recoveryStats().currentAsTarget(); + final int referencesToStore = recoveryTarget.store().refCount(); + IndexShard indexShard = recoveryTarget.indexShard(); + Store store = recoveryTarget.store(); + String tempFileName = recoveryTarget.getTempNameForFile("foobar"); + RecoveryTarget resetRecovery = collection.resetRecovery(recoveryId, TimeValue.timeValueMinutes(60)); + final long resetRecoveryId = resetRecovery.recoveryId(); + assertNotSame(recoveryTarget, resetRecovery); + assertNotSame(recoveryTarget.CancellableThreads(), resetRecovery.CancellableThreads()); + assertSame(indexShard, resetRecovery.indexShard()); + assertSame(store, resetRecovery.store()); + assertEquals(referencesToStore, resetRecovery.store().refCount()); + assertEquals(currentAsTarget, shard.recoveryStats().currentAsTarget()); + assertEquals(recoveryTarget.refCount(), 0); + expectThrows(ElasticsearchException.class, () -> recoveryTarget.store()); + expectThrows(ElasticsearchException.class, () -> recoveryTarget.indexShard()); + String resetTempFileName = resetRecovery.getTempNameForFile("foobar"); + assertNotEquals(tempFileName, resetTempFileName); + assertEquals(currentAsTarget, shard.recoveryStats().currentAsTarget()); + try (RecoveriesCollection.RecoveryRef newRecoveryRef = collection.getRecovery(resetRecoveryId)) { shards.recoverReplica(shard, (s, n) -> { - assertSame(s, resetRecovery.status().indexShard()); - return resetRecovery.status(); + assertSame(s, newRecoveryRef.target().indexShard()); + return newRecoveryRef.target(); }, false); } shards.assertAllEqual(numDocs); From 3999e5ba6bb2e4a523132490ac1ecd6931f109bd Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 29 Dec 2016 11:05:28 +0100 Subject: [PATCH 035/119] Docs: Added link from bool and constant score query to filter context Closes #22353 --- docs/reference/query-dsl/bool-query.asciidoc | 4 +++- docs/reference/query-dsl/constant-score-query.asciidoc | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/reference/query-dsl/bool-query.asciidoc b/docs/reference/query-dsl/bool-query.asciidoc index 2a289910c43..54fedbee3f7 100644 --- a/docs/reference/query-dsl/bool-query.asciidoc +++ b/docs/reference/query-dsl/bool-query.asciidoc @@ -13,7 +13,9 @@ occurrence types are: contribute to the score. |`filter` |The clause (query) must appear in matching documents. However unlike -`must` the score of the query will be ignored. +`must` the score of the query will be ignored. Filter clauses are executed +in <>, meaning that scoring is ignored +and clauses are considered for caching. |`should` |The clause (query) should appear in the matching document. In a boolean query with no `must` or `filter` clauses, one or more `should` clauses diff --git a/docs/reference/query-dsl/constant-score-query.asciidoc b/docs/reference/query-dsl/constant-score-query.asciidoc index bced9fc9fbe..aa7ee60aa5c 100644 --- a/docs/reference/query-dsl/constant-score-query.asciidoc +++ b/docs/reference/query-dsl/constant-score-query.asciidoc @@ -20,3 +20,6 @@ GET /_search } -------------------------------------------------- // CONSOLE + +Filter clauses are executed in <>, +meaning that scoring is ignored and clauses are considered for caching. From 816e1c6cc4db70727e4f006646330dea7abb92cb Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 29 Dec 2016 16:51:37 +0100 Subject: [PATCH 036/119] Free shard resources when recovery reset is cancelled Resetting a recovery consists of resetting the old recovery target and replacing it by a new recovery target object. This is done on the Cancellable threads of the new recovery target. If the new recovery target is already cancelled before or while this happens, for example due to shard closing or recovery source changing, we have to make sure that the old recovery target object frees all shard resources. Relates to #22325 --- .../indices/recovery/RecoveriesCollection.java | 14 ++++++++++++-- .../indices/recovery/RecoveryTarget.java | 11 ++++++++--- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index 2ed79a8a996..3bee3febf3f 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -26,6 +26,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.Callback; +import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.shard.IndexShard; @@ -106,9 +107,18 @@ public class RecoveriesCollection { } // Closes the current recovery target - final RecoveryTarget finalOldRecoveryTarget = oldRecoveryTarget; final AtomicBoolean successfulReset = new AtomicBoolean(); - newRecoveryTarget.CancellableThreads().executeIO(() -> successfulReset.set(finalOldRecoveryTarget.resetRecovery())); + try { + final RecoveryTarget finalOldRecoveryTarget = oldRecoveryTarget; + newRecoveryTarget.CancellableThreads().executeIO(() -> successfulReset.set(finalOldRecoveryTarget.resetRecovery())); + } catch (CancellableThreads.ExecutionCancelledException e) { + // new recovery target is already cancelled (probably due to shard closing or recovery source changing) + assert onGoingRecoveries.containsKey(newRecoveryTarget.recoveryId()) == false; + logger.trace("{} recovery reset cancelled, recovery from {}, id [{}], previous id [{}]", newRecoveryTarget.shardId(), + newRecoveryTarget.sourceNode(), newRecoveryTarget.recoveryId(), oldRecoveryTarget.recoveryId()); + oldRecoveryTarget.cancel("recovery reset cancelled"); // if finalOldRecoveryTarget.resetRecovery did not even get to execute + return null; + } if (successfulReset.get() == false) { cancelRecovery(newRecoveryTarget.recoveryId(), "failed to reset recovery"); return null; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 981a5f3ed8f..4311d3b2ab1 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -184,9 +184,14 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget */ boolean resetRecovery() throws InterruptedException, IOException { if (finished.compareAndSet(false, true)) { - logger.debug("reset of recovery with shard {} and id [{}]", shardId, recoveryId); - // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now - decRef(); + try { + // yes, this is just a logger call in a try-finally block. The reason for this is that resetRecovery is called from + // CancellableThreads and we have to make sure that all references to IndexShard are cleaned up before exiting this method + logger.debug("reset of recovery with shard {} and id [{}]", shardId, recoveryId); + } finally { + // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now. + decRef(); + } closedLatch.await(); RecoveryState.Stage stage = indexShard.recoveryState().getStage(); if (indexShard.recoveryState().getPrimary() && (stage == RecoveryState.Stage.FINALIZE || stage == RecoveryState.Stage.DONE)) { From 117b63ed41c9cfe68caf45302bc1ff522a904d54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Bobruk?= Date: Thu, 29 Dec 2016 18:21:32 +0100 Subject: [PATCH 037/119] Docs fix native script usage in 5.x (#22362) --- docs/reference/modules/scripting/native.asciidoc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/reference/modules/scripting/native.asciidoc b/docs/reference/modules/scripting/native.asciidoc index 8fb49b03d96..ba8b154846f 100644 --- a/docs/reference/modules/scripting/native.asciidoc +++ b/docs/reference/modules/scripting/native.asciidoc @@ -36,6 +36,10 @@ public class MyNativeScriptPlugin extends Plugin implements ScriptPlugin { public boolean needsScores() { return false; } + @Override + public String getName() { + return "my_script"; + } } public static class MyNativeScript extends AbstractDoubleSearchScript { @@ -66,7 +70,7 @@ curl -XPOST localhost:9200/_search -d '{ { "script_score": { "script": { - "id": "my_script", + "inline": "my_script", "lang" : "native" } } From 3f805d68cb9047e8cd77654561dd0f5e5bba5379 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 30 Dec 2016 09:36:10 +0100 Subject: [PATCH 038/119] Add the ability to set an analyzer on keyword fields. (#21919) This adds a new `normalizer` property to `keyword` fields that pre-processes the field value prior to indexing, but without altering the `_source`. Note that only the normalization components that work on a per-character basis are applied, so for instance stemming filters will be ignored while lowercasing or ascii folding will be applied. Closes #18064 --- .../metadata/MetaDataIndexUpgradeService.java | 5 +- .../index/analysis/AnalysisRegistry.java | 131 +++++++++++--- .../index/analysis/CustomAnalyzer.java | 23 +++ .../analysis/CustomNormalizerProvider.java | 95 ++++++++++ .../index/analysis/IndexAnalyzers.java | 14 +- .../index/mapper/KeywordFieldMapper.java | 114 +++++++++++- .../indices/analysis/AnalysisModule.java | 10 +- .../gateway/GatewayIndexStateIT.java | 2 +- .../elasticsearch/index/IndexModuleTests.java | 26 +-- .../index/analysis/AnalysisRegistryTests.java | 26 +-- .../index/analysis/CustomNormalizerTests.java | 102 +++++++++++ .../index/engine/InternalEngineTests.java | 2 +- .../index/mapper/KeywordFieldMapperTests.java | 64 ++++++- .../index/mapper/KeywordFieldTypeTests.java | 48 +++++- .../index/mapper/ParentFieldMapperTests.java | 2 +- docs/reference/analysis.asciidoc | 2 + docs/reference/analysis/normalizers.asciidoc | 57 ++++++ docs/reference/mapping/params.asciidoc | 3 + .../mapping/params/normalizer.asciidoc | 163 ++++++++++++++++++ docs/reference/mapping/types/keyword.asciidoc | 6 + 20 files changed, 827 insertions(+), 68 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/index/analysis/CustomNormalizerProvider.java create mode 100644 core/src/test/java/org/elasticsearch/index/analysis/CustomNormalizerTests.java create mode 100644 docs/reference/analysis/normalizers.asciidoc create mode 100644 docs/reference/mapping/params/normalizer.asciidoc diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 1779699d448..614d12547fc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -145,11 +145,10 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { @Override public Set> entrySet() { - // just to ensure we can iterate over this single analzyer - return Collections.singletonMap(fakeDefault.name(), fakeDefault).entrySet(); + return Collections.emptySet(); } }; - try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap)) { + try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap)) { MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService, mapperRegistry, () -> null); mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, false); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 87f9692f625..25ef5d1885f 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -67,17 +67,20 @@ public final class AnalysisRegistry implements Closeable { private final Map> tokenFilters; private final Map> tokenizers; private final Map>> analyzers; + private final Map>> normalizers; public AnalysisRegistry(Environment environment, Map> charFilters, Map> tokenFilters, Map> tokenizers, - Map>> analyzers) { + Map>> analyzers, + Map>> normalizers) { this.environment = environment; this.charFilters = unmodifiableMap(charFilters); this.tokenFilters = unmodifiableMap(tokenFilters); this.tokenizers = unmodifiableMap(tokenizers); this.analyzers = unmodifiableMap(analyzers); + this.normalizers = unmodifiableMap(normalizers); } /** @@ -151,7 +154,8 @@ public final class AnalysisRegistry implements Closeable { final Map tokenizerFactories = buildTokenizerFactories(indexSettings); final Map tokenFilterFactories = buildTokenFilterFactories(indexSettings); final Map> analyzierFactories = buildAnalyzerFactories(indexSettings); - return build(indexSettings, analyzierFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories); + final Map> normalizerFactories = buildNormalizerFactories(indexSettings); + return build(indexSettings, analyzierFactories, normalizerFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories); } public Map buildTokenFilterFactories(IndexSettings indexSettings) throws IOException { @@ -164,22 +168,28 @@ public final class AnalysisRegistry implements Closeable { */ tokenFilters.put("synonym", requriesAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings))); tokenFilters.put("synonym_graph", requriesAnalysisSettings((is, env, name, settings) -> new SynonymGraphFilterFactory(is, env, this, name, settings))); - return buildMapping(false, "tokenfilter", indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.tokenFilterFactories); + return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.tokenFilterFactories); } public Map buildTokenizerFactories(IndexSettings indexSettings) throws IOException { final Map tokenizersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_TOKENIZER); - return buildMapping(false, "tokenizer", indexSettings, tokenizersSettings, tokenizers, prebuiltAnalysis.tokenizerFactories); + return buildMapping(Component.TOKENIZER, indexSettings, tokenizersSettings, tokenizers, prebuiltAnalysis.tokenizerFactories); } public Map buildCharFilterFactories(IndexSettings indexSettings) throws IOException { final Map charFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_CHAR_FILTER); - return buildMapping(false, "charfilter", indexSettings, charFiltersSettings, charFilters, prebuiltAnalysis.charFilterFactories); + return buildMapping(Component.CHAR_FILTER, indexSettings, charFiltersSettings, charFilters, prebuiltAnalysis.charFilterFactories); } public Map> buildAnalyzerFactories(IndexSettings indexSettings) throws IOException { final Map analyzersSettings = indexSettings.getSettings().getGroups("index.analysis.analyzer"); - return buildMapping(true, "analyzer", indexSettings, analyzersSettings, analyzers, prebuiltAnalysis.analyzerProviderFactories); + return buildMapping(Component.ANALYZER, indexSettings, analyzersSettings, analyzers, prebuiltAnalysis.analyzerProviderFactories); + } + + public Map> buildNormalizerFactories(IndexSettings indexSettings) throws IOException { + final Map noralizersSettings = indexSettings.getSettings().getGroups("index.analysis.normalizer"); + // TODO: Have pre-built normalizers + return buildMapping(Component.NORMALIZER, indexSettings, noralizersSettings, normalizers, Collections.emptyMap()); } /** @@ -194,7 +204,7 @@ public final class AnalysisRegistry implements Closeable { final Map tokenizerSettings = indexSettings.getSettings().getGroups("index.analysis.tokenizer"); if (tokenizerSettings.containsKey(tokenizer)) { Settings currentSettings = tokenizerSettings.get(tokenizer); - return getAnalysisProvider("tokenizer", tokenizers, tokenizer, currentSettings.get("type")); + return getAnalysisProvider(Component.TOKENIZER, tokenizers, tokenizer, currentSettings.get("type")); } else { return getTokenizerProvider(tokenizer); } @@ -223,7 +233,7 @@ public final class AnalysisRegistry implements Closeable { } else if ("synonym_graph".equals(typeName)) { return requriesAnalysisSettings((is, env, name, settings) -> new SynonymGraphFilterFactory(is, env, this, name, settings)); } else { - return getAnalysisProvider("tokenfilter", tokenFilters, tokenFilter, typeName); + return getAnalysisProvider(Component.FILTER, tokenFilters, tokenFilter, typeName); } } else { return getTokenFilterProvider(tokenFilter); @@ -242,7 +252,7 @@ public final class AnalysisRegistry implements Closeable { final Map tokenFilterSettings = indexSettings.getSettings().getGroups("index.analysis.char_filter"); if (tokenFilterSettings.containsKey(charFilter)) { Settings currentSettings = tokenFilterSettings.get(charFilter); - return getAnalysisProvider("charfilter", charFilters, charFilter, currentSettings.get("type")); + return getAnalysisProvider(Component.CHAR_FILTER, charFilters, charFilter, currentSettings.get("type")); } else { return getCharFilterProvider(charFilter); } @@ -261,7 +271,40 @@ public final class AnalysisRegistry implements Closeable { }; } - private Map buildMapping(boolean analyzer, String toBuild, IndexSettings settings, Map settingsMap, + enum Component { + ANALYZER { + @Override + public String toString() { + return "analyzer"; + } + }, + NORMALIZER { + @Override + public String toString() { + return "normalizer"; + } + }, + CHAR_FILTER { + @Override + public String toString() { + return "char_filter"; + } + }, + TOKENIZER { + @Override + public String toString() { + return "tokenizer"; + } + }, + FILTER { + @Override + public String toString() { + return "filter"; + } + }; + } + + private Map buildMapping(Component component, IndexSettings settings, Map settingsMap, Map> providerMap, Map> defaultInstance) throws IOException { Settings defaultSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, settings.getIndexVersionCreated()).build(); @@ -270,29 +313,34 @@ public final class AnalysisRegistry implements Closeable { String name = entry.getKey(); Settings currentSettings = entry.getValue(); String typeName = currentSettings.get("type"); - if (analyzer) { - T factory; + if (component == Component.ANALYZER) { + T factory = null; if (typeName == null) { if (currentSettings.get("tokenizer") != null) { factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings); } else { - throw new IllegalArgumentException(toBuild + " [" + name + "] must specify either an analyzer type, or a tokenizer"); + throw new IllegalArgumentException(component + " [" + name + "] must specify either an analyzer type, or a tokenizer"); } } else if (typeName.equals("custom")) { factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings); - } else { - AnalysisModule.AnalysisProvider type = providerMap.get(typeName); - if (type == null) { - throw new IllegalArgumentException("Unknown " + toBuild + " type [" + typeName + "] for [" + name + "]"); - } - factory = type.get(settings, environment, name, currentSettings); } - factories.put(name, factory); - } else { - AnalysisProvider type = getAnalysisProvider(toBuild, providerMap, name, typeName); - final T factory = type.get(settings, environment, name, currentSettings); - factories.put(name, factory); + if (factory != null) { + factories.put(name, factory); + continue; + } + } else if (component == Component.NORMALIZER) { + if (typeName == null || typeName.equals("custom")) { + T factory = (T) new CustomNormalizerProvider(settings, name, currentSettings); + factories.put(name, factory); + continue; + } } + AnalysisProvider type = getAnalysisProvider(component, providerMap, name, typeName); + if (type == null) { + throw new IllegalArgumentException("Unknown " + component + " type [" + typeName + "] for [" + name + "]"); + } + final T factory = type.get(settings, environment, name, currentSettings); + factories.put(name, factory); } // go over the char filters in the bindings and register the ones that are not configured @@ -330,13 +378,13 @@ public final class AnalysisRegistry implements Closeable { return factories; } - private AnalysisProvider getAnalysisProvider(String toBuild, Map> providerMap, String name, String typeName) { + private AnalysisProvider getAnalysisProvider(Component component, Map> providerMap, String name, String typeName) { if (typeName == null) { - throw new IllegalArgumentException(toBuild + " [" + name + "] must specify either an analyzer type, or a tokenizer"); + throw new IllegalArgumentException(component + " [" + name + "] must specify either an analyzer type, or a tokenizer"); } AnalysisProvider type = providerMap.get(typeName); if (type == null) { - throw new IllegalArgumentException("Unknown " + toBuild + " type [" + typeName + "] for [" + name + "]"); + throw new IllegalArgumentException("Unknown " + component + " type [" + typeName + "] for [" + name + "]"); } return type; } @@ -426,6 +474,7 @@ public final class AnalysisRegistry implements Closeable { public IndexAnalyzers build(IndexSettings indexSettings, Map> analyzerProviders, + Map> normalizerProviders, Map tokenizerFactoryFactories, Map charFilterFactoryFactories, Map tokenFilterFactoryFactories) { @@ -436,10 +485,15 @@ public final class AnalysisRegistry implements Closeable { DeprecationLogger deprecationLogger = new DeprecationLogger(logger); Map analyzerAliases = new HashMap<>(); Map analyzers = new HashMap<>(); + Map normalizers = new HashMap<>(); for (Map.Entry> entry : analyzerProviders.entrySet()) { processAnalyzerFactory(deprecationLogger, indexSettings, entry.getKey(), entry.getValue(), analyzerAliases, analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); } + for (Map.Entry> entry : normalizerProviders.entrySet()) { + processNormalizerFactory(deprecationLogger, indexSettings, entry.getKey(), entry.getValue(), normalizers, + tokenFilterFactoryFactories, charFilterFactoryFactories); + } for (Map.Entry entry : analyzerAliases.entrySet()) { String key = entry.getKey(); if (analyzers.containsKey(key) && @@ -485,7 +539,7 @@ public final class AnalysisRegistry implements Closeable { } } return new IndexAnalyzers(indexSettings, defaultIndexAnalyzer, defaultSearchAnalyzer, defaultSearchQuoteAnalyzer, - unmodifiableMap(analyzers)); + unmodifiableMap(analyzers), unmodifiableMap(normalizers)); } private void processAnalyzerFactory(DeprecationLogger deprecationLogger, @@ -551,4 +605,25 @@ public final class AnalysisRegistry implements Closeable { } } } + + private void processNormalizerFactory(DeprecationLogger deprecationLogger, + IndexSettings indexSettings, + String name, + AnalyzerProvider normalizerFactory, + Map normalizers, + Map tokenFilters, + Map charFilters) { + if (normalizerFactory instanceof CustomNormalizerProvider) { + ((CustomNormalizerProvider) normalizerFactory).build(charFilters, tokenFilters); + } + Analyzer normalizerF = normalizerFactory.get(); + if (normalizerF == null) { + throw new IllegalArgumentException("normalizer [" + normalizerFactory.name() + "] created null normalizer"); + } + NamedAnalyzer normalizer = new NamedAnalyzer(name, normalizerFactory.scope(), normalizerF); + if (normalizers.containsKey(name)) { + throw new IllegalStateException("already registered analyzer with name: " + name); + } + normalizers.put(name, normalizer); + } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java index 6185f358568..68799413907 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java @@ -94,4 +94,27 @@ public final class CustomAnalyzer extends Analyzer { } return reader; } + + @Override + protected Reader initReaderForNormalization(String fieldName, Reader reader) { + for (CharFilterFactory charFilter : charFilters) { + if (charFilter instanceof MultiTermAwareComponent) { + charFilter = (CharFilterFactory) ((MultiTermAwareComponent) charFilter).getMultiTermComponent(); + reader = charFilter.create(reader); + } + } + return reader; + } + + @Override + protected TokenStream normalize(String fieldName, TokenStream in) { + TokenStream result = in; + for (TokenFilterFactory filter : tokenFilters) { + if (filter instanceof MultiTermAwareComponent) { + filter = (TokenFilterFactory) ((MultiTermAwareComponent) filter).getMultiTermComponent(); + result = filter.create(result); + } + } + return result; + } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CustomNormalizerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/CustomNormalizerProvider.java new file mode 100644 index 00000000000..4f50a34dd9e --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/analysis/CustomNormalizerProvider.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.indices.analysis.PreBuiltTokenizers; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * A custom normalizer that is built out of a char and token filters. On the + * contrary to analyzers, it does not support tokenizers and only supports a + * subset of char and token filters. + */ +public final class CustomNormalizerProvider extends AbstractIndexAnalyzerProvider { + + private final Settings analyzerSettings; + + private CustomAnalyzer customAnalyzer; + + public CustomNormalizerProvider(IndexSettings indexSettings, + String name, Settings settings) { + super(indexSettings, name, settings); + this.analyzerSettings = settings; + } + + public void build(final Map charFilters, final Map tokenFilters) { + String tokenizerName = analyzerSettings.get("tokenizer"); + if (tokenizerName != null) { + throw new IllegalArgumentException("Custom normalizer [" + name() + "] cannot configure a tokenizer"); + } + + List charFiltersList = new ArrayList<>(); + String[] charFilterNames = analyzerSettings.getAsArray("char_filter"); + for (String charFilterName : charFilterNames) { + CharFilterFactory charFilter = charFilters.get(charFilterName); + if (charFilter == null) { + throw new IllegalArgumentException("Custom normalizer [" + name() + "] failed to find char_filter under name [" + + charFilterName + "]"); + } + if (charFilter instanceof MultiTermAwareComponent == false) { + throw new IllegalArgumentException("Custom normalizer [" + name() + "] may not use char filter [" + + charFilterName + "]"); + } + charFilter = (CharFilterFactory) ((MultiTermAwareComponent) charFilter).getMultiTermComponent(); + charFiltersList.add(charFilter); + } + + List tokenFilterList = new ArrayList<>(); + String[] tokenFilterNames = analyzerSettings.getAsArray("filter"); + for (String tokenFilterName : tokenFilterNames) { + TokenFilterFactory tokenFilter = tokenFilters.get(tokenFilterName); + if (tokenFilter == null) { + throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find filter under name [" + + tokenFilterName + "]"); + } + if (tokenFilter instanceof MultiTermAwareComponent == false) { + throw new IllegalArgumentException("Custom normalizer [" + name() + "] may not use filter [" + tokenFilterName + "]"); + } + tokenFilter = (TokenFilterFactory) ((MultiTermAwareComponent) tokenFilter).getMultiTermComponent(); + tokenFilterList.add(tokenFilter); + } + + this.customAnalyzer = new CustomAnalyzer( + PreBuiltTokenizers.KEYWORD.getTokenizerFactory(indexSettings.getIndexVersionCreated()), + charFiltersList.toArray(new CharFilterFactory[charFiltersList.size()]), + tokenFilterList.toArray(new TokenFilterFactory[tokenFilterList.size()]) + ); + } + + @Override + public CustomAnalyzer get() { + return this.customAnalyzer; + } +} diff --git a/core/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java b/core/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java index 127714178b5..f3200d606fb 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java @@ -25,6 +25,7 @@ import org.elasticsearch.index.IndexSettings; import java.io.Closeable; import java.io.IOException; import java.util.Map; +import java.util.stream.Stream; /** * IndexAnalyzers contains a name to analyzer mapping for a specific index. @@ -38,15 +39,18 @@ public final class IndexAnalyzers extends AbstractIndexComponent implements Clos private final NamedAnalyzer defaultSearchAnalyzer; private final NamedAnalyzer defaultSearchQuoteAnalyzer; private final Map analyzers; + private final Map normalizers; private final IndexSettings indexSettings; public IndexAnalyzers(IndexSettings indexSettings, NamedAnalyzer defaultIndexAnalyzer, NamedAnalyzer defaultSearchAnalyzer, - NamedAnalyzer defaultSearchQuoteAnalyzer, Map analyzers) { + NamedAnalyzer defaultSearchQuoteAnalyzer, Map analyzers, + Map normalizers) { super(indexSettings); this.defaultIndexAnalyzer = defaultIndexAnalyzer; this.defaultSearchAnalyzer = defaultSearchAnalyzer; this.defaultSearchQuoteAnalyzer = defaultSearchQuoteAnalyzer; this.analyzers = analyzers; + this.normalizers = normalizers; this.indexSettings = indexSettings; } @@ -57,6 +61,12 @@ public final class IndexAnalyzers extends AbstractIndexComponent implements Clos return analyzers.get(name); } + /** + * Returns a normalizer mapped to the given name or null if not present + */ + public NamedAnalyzer getNormalizer(String name) { + return normalizers.get(name); + } /** * Returns the default index analyzer for this index @@ -81,7 +91,7 @@ public final class IndexAnalyzers extends AbstractIndexComponent implements Clos @Override public void close() throws IOException { - IOUtils.close(() -> analyzers.values().stream() + IOUtils.close(() -> Stream.concat(analyzers.values().stream(), normalizers.values().stream()) .filter(a -> a.scope() == AnalyzerScope.INDEX) .iterator()); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 68807215027..f4f6266262f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -19,16 +19,20 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; @@ -36,6 +40,7 @@ import java.io.IOException; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.index.mapper.TypeParsers.parseField; @@ -70,6 +75,11 @@ public final class KeywordFieldMapper extends FieldMapper { builder = this; } + @Override + public KeywordFieldType fieldType() { + return (KeywordFieldType) super.fieldType(); + } + public Builder ignoreAbove(int ignoreAbove) { if (ignoreAbove < 0) { throw new IllegalArgumentException("[ignore_above] must be positive, got " + ignoreAbove); @@ -92,6 +102,12 @@ public final class KeywordFieldMapper extends FieldMapper { return builder; } + public Builder normalizer(NamedAnalyzer normalizer) { + fieldType().setNormalizer(normalizer); + fieldType().setSearchAnalyzer(normalizer); + return builder; + } + @Override public KeywordFieldMapper build(BuilderContext context) { setupFieldType(context); @@ -103,7 +119,7 @@ public final class KeywordFieldMapper extends FieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override - public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { KeywordFieldMapper.Builder builder = new KeywordFieldMapper.Builder(name); parseField(builder, name, node, parserContext); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { @@ -125,6 +141,15 @@ public final class KeywordFieldMapper extends FieldMapper { } else if (propName.equals("eager_global_ordinals")) { builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(propNode)); iterator.remove(); + } else if (propName.equals("normalizer")) { + if (propNode != null) { + NamedAnalyzer normalizer = parserContext.getIndexAnalyzers().getNormalizer(propNode.toString()); + if (normalizer == null) { + throw new MapperParsingException("normalizer [" + propNode.toString() + "] not found for field [" + name + "]"); + } + builder.normalizer(normalizer); + } + iterator.remove(); } } return builder; @@ -133,21 +158,58 @@ public final class KeywordFieldMapper extends FieldMapper { public static final class KeywordFieldType extends StringFieldType { - public KeywordFieldType() {} + private NamedAnalyzer normalizer = null; + + public KeywordFieldType() { + setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); + } protected KeywordFieldType(KeywordFieldType ref) { super(ref); + this.normalizer = ref.normalizer; } public KeywordFieldType clone() { return new KeywordFieldType(this); } + @Override + public boolean equals(Object o) { + if (super.equals(o) == false) { + return false; + } + return Objects.equals(normalizer, ((KeywordFieldType) o).normalizer); + } + + @Override + public void checkCompatibility(MappedFieldType otherFT, List conflicts, boolean strict) { + super.checkCompatibility(otherFT, conflicts, strict); + KeywordFieldType other = (KeywordFieldType) otherFT; + if (Objects.equals(normalizer, other.normalizer) == false) { + conflicts.add("mapper [" + name() + "] has different [normalizer]"); + } + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hashCode(normalizer); + } + @Override public String typeName() { return CONTENT_TYPE; } + public NamedAnalyzer normalizer() { + return normalizer; + } + + public void setNormalizer(NamedAnalyzer normalizer) { + checkIfFrozen(); + this.normalizer = normalizer; + } + @Override public Query nullValueQuery() { if (nullValue() == null) { @@ -171,13 +233,25 @@ public final class KeywordFieldMapper extends FieldMapper { BytesRef binaryValue = (BytesRef) value; return binaryValue.utf8ToString(); } + + @Override + protected BytesRef indexedValueForSearch(Object value) { + if (value == null) { + return null; + } + if (value instanceof BytesRef) { + value = ((BytesRef) value).utf8ToString(); + } + return searchAnalyzer().normalize(name(), value.toString()); + } } private Boolean includeInAll; private int ignoreAbove; protected KeywordFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, - int ignoreAbove, Boolean includeInAll, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + int ignoreAbove, Boolean includeInAll, + Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0; this.ignoreAbove = ignoreAbove; @@ -196,6 +270,11 @@ public final class KeywordFieldMapper extends FieldMapper { return (KeywordFieldMapper) super.clone(); } + @Override + public KeywordFieldType fieldType() { + return (KeywordFieldType) super.fieldType(); + } + // pkg-private for testing Boolean includeInAll() { return includeInAll; @@ -203,7 +282,7 @@ public final class KeywordFieldMapper extends FieldMapper { @Override protected void parseCreateField(ParseContext context, List fields) throws IOException { - final String value; + String value; if (context.externalValueSet()) { value = context.externalValue().toString(); } else { @@ -219,6 +298,27 @@ public final class KeywordFieldMapper extends FieldMapper { return; } + final NamedAnalyzer normalizer = fieldType().normalizer(); + if (normalizer != null) { + try (final TokenStream ts = normalizer.tokenStream(name(), value)) { + final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); + ts.reset(); + if (ts.incrementToken() == false) { + throw new IllegalStateException("The normalization token stream is " + + "expected to produce exactly 1 token, but got 0 for analyzer " + + normalizer + " and input \"" + value + "\""); + } + final String newValue = termAtt.toString(); + if (ts.incrementToken()) { + throw new IllegalStateException("The normalization token stream is " + + "expected to produce exactly 1 token, but got 2+ for analyzer " + + normalizer + " and input \"" + value + "\""); + } + ts.end(); + value = newValue; + } + } + if (context.includeInAll(includeInAll, this)) { context.allEntries().addText(fieldType().name(), value, fieldType().boost()); } @@ -263,5 +363,11 @@ public final class KeywordFieldMapper extends FieldMapper { if (includeDefaults || ignoreAbove != Defaults.IGNORE_ABOVE) { builder.field("ignore_above", ignoreAbove); } + + if (fieldType().normalizer() != null) { + builder.field("normalizer", fieldType().normalizer().name()); + } else if (includeDefaults) { + builder.nullField("normalizer"); + } } } diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index 89c9421198d..fae4c75b655 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -170,8 +170,9 @@ public final class AnalysisModule { NamedRegistry> tokenFilters = setupTokenFilters(plugins, hunspellService); NamedRegistry> tokenizers = setupTokenizers(plugins); NamedRegistry>> analyzers = setupAnalyzers(plugins); + NamedRegistry>> normalizers = setupNormalizers(plugins); analysisRegistry = new AnalysisRegistry(environment, charFilters.getRegistry(), tokenFilters.getRegistry(), tokenizers - .getRegistry(), analyzers.getRegistry()); + .getRegistry(), analyzers.getRegistry(), normalizers.getRegistry()); } HunspellService getHunspellService() { @@ -334,6 +335,13 @@ public final class AnalysisModule { return analyzers; } + private NamedRegistry>> setupNormalizers(List plugins) { + NamedRegistry>> normalizers = new NamedRegistry<>("normalizer"); + // TODO: provide built-in normalizer providers? + // TODO: pluggability? + return normalizers; + } + private static AnalysisModule.AnalysisProvider requriesAnalysisSettings(AnalysisModule.AnalysisProvider provider) { return new AnalysisModule.AnalysisProvider() { @Override diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index c8607e0af31..25153576b6b 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -435,7 +435,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase { assertEquals(ex.getMessage(), "Failed to verify index " + metaData.getIndex()); assertNotNull(ex.getCause()); assertEquals(IllegalArgumentException.class, ex.getCause().getClass()); - assertEquals(ex.getCause().getMessage(), "Unknown tokenfilter type [icu_collation] for [myCollator]"); + assertEquals(ex.getCause().getMessage(), "Unknown filter type [icu_collation] for [myCollator]"); } /** diff --git a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 46281c812f1..e62f2178a4f 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -148,7 +148,7 @@ public class IndexModuleTests extends ESTestCase { public void testWrapperIsBound() throws IOException { IndexModule module = new IndexModule(indexSettings, - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); module.setSearcherWrapper((s) -> new Wrapper()); module.engineFactory.set(new MockEngineFactory(AssertingDirectoryReader.class)); @@ -168,7 +168,7 @@ public class IndexModuleTests extends ESTestCase { .build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings); IndexModule module = new IndexModule(indexSettings, - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); module.addIndexStore("foo_store", FooStore::new); try { module.addIndexStore("foo_store", FooStore::new); @@ -193,7 +193,7 @@ public class IndexModuleTests extends ESTestCase { }; IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings); IndexModule module = new IndexModule(indexSettings, - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); module.addIndexEventListener(eventListener); IndexService indexService = newIndexService(module); IndexSettings x = indexService.getIndexSettings(); @@ -208,7 +208,7 @@ public class IndexModuleTests extends ESTestCase { public void testListener() throws IOException { Setting booleanSetting = Setting.boolSetting("index.foo.bar", false, Property.Dynamic, Property.IndexScope); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings(index, settings, booleanSetting), - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); Setting booleanSetting2 = Setting.boolSetting("index.foo.bar.baz", false, Property.Dynamic, Property.IndexScope); AtomicBoolean atomicBoolean = new AtomicBoolean(false); module.addSettingsUpdateConsumer(booleanSetting, atomicBoolean::set); @@ -228,7 +228,7 @@ public class IndexModuleTests extends ESTestCase { public void testAddIndexOperationListener() throws IOException { IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings(index, settings), - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); AtomicBoolean executed = new AtomicBoolean(false); IndexingOperationListener listener = new IndexingOperationListener() { @Override @@ -257,7 +257,7 @@ public class IndexModuleTests extends ESTestCase { public void testAddSearchOperationListener() throws IOException { IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings(index, settings), - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); AtomicBoolean executed = new AtomicBoolean(false); SearchOperationListener listener = new SearchOperationListener() { @@ -291,7 +291,7 @@ public class IndexModuleTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); module.addSimilarity("test_similarity", (string, settings) -> new SimilarityProvider() { @Override public String name() { @@ -315,7 +315,7 @@ public class IndexModuleTests extends ESTestCase { public void testFrozen() { IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings(index, settings), - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); module.freeze(); String msg = "Can't modify IndexModule once the index service has been created"; assertEquals(msg, expectThrows(IllegalStateException.class, () -> module.addSearchOperationListener(null)).getMessage()); @@ -334,7 +334,7 @@ public class IndexModuleTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); Exception ex = expectThrows(IllegalArgumentException.class, () -> newIndexService(module)); assertEquals("Unknown Similarity type [test_similarity] for [my_similarity]", ex.getMessage()); } @@ -346,7 +346,7 @@ public class IndexModuleTests extends ESTestCase { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); Exception ex = expectThrows(IllegalArgumentException.class, () -> newIndexService(module)); assertEquals("Similarity [my_similarity] must have an associated type", ex.getMessage()); } @@ -356,7 +356,7 @@ public class IndexModuleTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); module.forceQueryCacheProvider((a, b) -> new CustomQueryCache()); expectThrows(AlreadySetException.class, () -> module.forceQueryCacheProvider((a, b) -> new CustomQueryCache())); IndexService indexService = newIndexService(module); @@ -369,7 +369,7 @@ public class IndexModuleTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); IndexService indexService = newIndexService(module); assertTrue(indexService.cache().query() instanceof IndexQueryCache); indexService.close("simon says", false); @@ -381,7 +381,7 @@ public class IndexModuleTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap())); module.forceQueryCacheProvider((a, b) -> new CustomQueryCache()); IndexService indexService = newIndexService(module); assertTrue(indexService.cache().query() instanceof DisabledQueryCache); diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index dedd478e3bf..432ff5247b5 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -65,7 +65,7 @@ public class AnalysisRegistryTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); registry = new AnalysisRegistry(new Environment(settings), - emptyMap(), emptyMap(), emptyMap(), emptyMap()); + emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()); } public void testDefaultAnalyzers() throws IOException { @@ -76,7 +76,8 @@ public class AnalysisRegistryTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - IndexAnalyzers indexAnalyzers = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap()) + IndexAnalyzers indexAnalyzers = new AnalysisRegistry(new Environment(settings), + emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()) .build(idxSettings); assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class)); assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class)); @@ -88,7 +89,7 @@ public class AnalysisRegistryTests extends ESTestCase { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings), singletonMap("default", analyzerProvider("default")) - , emptyMap(), emptyMap(), emptyMap()); + , emptyMap(), emptyMap(), emptyMap(), emptyMap()); assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); @@ -100,7 +101,7 @@ public class AnalysisRegistryTests extends ESTestCase { AnalyzerProvider defaultIndex = new PreBuiltAnalyzerProvider("default_index", AnalyzerScope.INDEX, new EnglishAnalyzer()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> registry.build(IndexSettingsModule.newIndexSettings("index", settings), - singletonMap("default_index", defaultIndex), emptyMap(), emptyMap(), emptyMap())); + singletonMap("default_index", defaultIndex), emptyMap(), emptyMap(), emptyMap(), emptyMap())); assertTrue(e.getMessage().contains("[index.analysis.analyzer.default_index] is not supported")); } @@ -109,7 +110,7 @@ public class AnalysisRegistryTests extends ESTestCase { VersionUtils.getPreviousVersion(Version.V_5_0_0_alpha1)); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings), - singletonMap("default_index", analyzerProvider("default_index")), emptyMap(), emptyMap(), emptyMap()); + singletonMap("default_index", analyzerProvider("default_index")), emptyMap(), emptyMap(), emptyMap(), emptyMap()); assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class)); assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class)); @@ -121,7 +122,7 @@ public class AnalysisRegistryTests extends ESTestCase { Version version = VersionUtils.randomVersion(random()); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings), - singletonMap("default_search", analyzerProvider("default_search")), emptyMap(), emptyMap(), emptyMap()); + singletonMap("default_search", analyzerProvider("default_search")), emptyMap(), emptyMap(), emptyMap(), emptyMap()); assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class)); assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); @@ -135,7 +136,7 @@ public class AnalysisRegistryTests extends ESTestCase { analyzers.put("default_index", analyzerProvider("default_index")); analyzers.put("default_search", analyzerProvider("default_search")); IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings), - analyzers, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); + analyzers, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); @@ -196,10 +197,11 @@ public class AnalysisRegistryTests extends ESTestCase { Settings indexSettings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); - IndexAnalyzers indexAnalyzers = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap()) + IndexAnalyzers indexAnalyzers = new AnalysisRegistry(new Environment(settings), + emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()) .build(idxSettings); IndexAnalyzers otherIndexAnalyzers = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), - emptyMap()).build(idxSettings); + emptyMap(), emptyMap()).build(idxSettings); final int numIters = randomIntBetween(5, 20); for (int i = 0; i < numIters; i++) { PreBuiltAnalyzers preBuiltAnalyzers = RandomPicks.randomFrom(random(), PreBuiltAnalyzers.values()); @@ -219,7 +221,8 @@ public class AnalysisRegistryTests extends ESTestCase { IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap()).build(idxSettings)); + () -> new AnalysisRegistry(new Environment(settings), + emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()).build(idxSettings)); assertThat(e.getMessage(), equalTo("analyzer [test_analyzer] must specify either an analyzer type, or a tokenizer")); } @@ -228,7 +231,8 @@ public class AnalysisRegistryTests extends ESTestCase { Settings indexSettings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); - IndexAnalyzers indexAnalyzers = new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap()) + IndexAnalyzers indexAnalyzers = new AnalysisRegistry(new Environment(settings), + emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()) .build(idxSettings); indexAnalyzers.close(); indexAnalyzers.close(); diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CustomNormalizerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CustomNormalizerTests.java new file mode 100644 index 00000000000..3e71a609737 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/analysis/CustomNormalizerTests.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.ESTokenStreamTestCase; + +import java.io.IOException; + +public class CustomNormalizerTests extends ESTokenStreamTestCase { + + public void testBasics() throws IOException { + Settings settings = Settings.builder() + .putArray("index.analysis.normalizer.my_normalizer.filter", "lowercase", "asciifolding") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings); + assertNull(analysis.indexAnalyzers.get("my_normalizer")); + NamedAnalyzer normalizer = analysis.indexAnalyzers.getNormalizer("my_normalizer"); + assertNotNull(normalizer); + assertEquals("my_normalizer", normalizer.name()); + assertTokenStreamContents(normalizer.tokenStream("foo", "Cet été-là"), new String[] {"cet ete-la"}); + assertEquals(new BytesRef("cet ete-la"), normalizer.normalize("foo", "Cet été-là")); + } + + public void testUnknownType() { + Settings settings = Settings.builder() + .put("index.analysis.normalizer.my_normalizer.type", "foobar") + .putArray("index.analysis.normalizer.my_normalizer.filter", "lowercase", "asciifolding") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings)); + assertEquals("Unknown normalizer type [foobar] for [my_normalizer]", e.getMessage()); + } + + public void testTokenizer() throws IOException { + Settings settings = Settings.builder() + .put("index.analysis.normalizer.my_normalizer.tokenizer", "keyword") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings)); + assertEquals("Custom normalizer [my_normalizer] cannot configure a tokenizer", e.getMessage()); + } + + public void testCharFilters() throws IOException { + Settings settings = Settings.builder() + .put("index.analysis.char_filter.my_mapping.type", "mapping") + .putArray("index.analysis.char_filter.my_mapping.mappings", "a => z") + .putArray("index.analysis.normalizer.my_normalizer.char_filter", "my_mapping") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings); + assertNull(analysis.indexAnalyzers.get("my_normalizer")); + NamedAnalyzer normalizer = analysis.indexAnalyzers.getNormalizer("my_normalizer"); + assertNotNull(normalizer); + assertEquals("my_normalizer", normalizer.name()); + assertTokenStreamContents(normalizer.tokenStream("foo", "abc"), new String[] {"zbc"}); + assertEquals(new BytesRef("zbc"), normalizer.normalize("foo", "abc")); + } + + public void testIllegalFilters() throws IOException { + Settings settings = Settings.builder() + .putArray("index.analysis.normalizer.my_normalizer.filter", "porter_stem") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings)); + assertEquals("Custom normalizer [my_normalizer] may not use filter [porter_stem]", e.getMessage()); + } + + public void testIllegalCharFilters() throws IOException { + Settings settings = Settings.builder() + .putArray("index.analysis.normalizer.my_normalizer.char_filter", "html_strip") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings)); + assertEquals("Custom normalizer [my_normalizer] may not use char filter [html_strip]", e.getMessage()); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 8816baceb00..f0ca8292f4f 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -2313,7 +2313,7 @@ public class InternalEngineTests extends ESTestCase { Index index = new Index(indexName, "_na_"); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings); NamedAnalyzer defaultAnalyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); - IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultAnalyzer, defaultAnalyzer, Collections.emptyMap()); + IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultAnalyzer, defaultAnalyzer, Collections.emptyMap(), Collections.emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); mapperService = new MapperService(indexSettings, indexAnalyzers, xContentRegistry, similarityService, mapperRegistry, diff --git a/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 6d3c5bcbce1..bffe58db3a6 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -25,8 +25,10 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -51,7 +53,11 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase { @Before public void setup() { - indexService = createIndex("test"); + indexService = createIndex("test", Settings.builder() + .put("index.analysis.normalizer.my_lowercase.type", "custom") + .putArray("index.analysis.normalizer.my_lowercase.filter", "lowercase") + .put("index.analysis.normalizer.my_asciifolding.type", "custom") + .putArray("index.analysis.normalizer.my_asciifolding.filter", "asciifolding").build()); parser = indexService.mapperService().documentMapperParser(); } @@ -283,6 +289,62 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase { assertFalse(fields[0].fieldType().omitNorms()); } + public void testNormalizer() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "keyword").field("normalizer", "my_lowercase").endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", "AbC") + .endObject() + .bytes()); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(2, fields.length); + + assertEquals(new BytesRef("abc"), fields[0].binaryValue()); + IndexableFieldType fieldType = fields[0].fieldType(); + assertThat(fieldType.omitNorms(), equalTo(true)); + assertFalse(fieldType.tokenized()); + assertFalse(fieldType.stored()); + assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS)); + assertThat(fieldType.storeTermVectors(), equalTo(false)); + assertThat(fieldType.storeTermVectorOffsets(), equalTo(false)); + assertThat(fieldType.storeTermVectorPositions(), equalTo(false)); + assertThat(fieldType.storeTermVectorPayloads(), equalTo(false)); + assertEquals(DocValuesType.NONE, fieldType.docValuesType()); + + assertEquals(new BytesRef("abc"), fields[1].binaryValue()); + fieldType = fields[1].fieldType(); + assertThat(fieldType.indexOptions(), equalTo(IndexOptions.NONE)); + assertEquals(DocValuesType.SORTED_SET, fieldType.docValuesType()); + } + + public void testUpdateNormalizer() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "keyword").field("normalizer", "my_lowercase").endObject().endObject() + .endObject().endObject().string(); + indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + + String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "keyword").field("normalizer", "my_asciifolding").endObject().endObject() + .endObject().endObject().string(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> indexService.mapperService().merge("type", + new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, randomBoolean())); + assertEquals( + "Mapper for [field] conflicts with existing mapping in other types:\n[mapper [field] has different [normalizer]]", + e.getMessage()); + } + public void testEmptyName() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject() .startObject("type") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java index fbbabf8ee3a..00eecc669f8 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java @@ -20,22 +20,41 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.LowerCaseFilter; +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.TermQuery; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.Fuzziness; -import org.elasticsearch.index.mapper.KeywordFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.analysis.AnalyzerScope; +import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.KeywordFieldMapper.KeywordFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; +import org.junit.Before; import java.io.IOException; import java.util.Arrays; public class KeywordFieldTypeTests extends FieldTypeTestCase { + + @Before + public void setupProperties() { + addModifier(new Modifier("normalizer", false) { + @Override + public void modify(MappedFieldType ft) { + ((KeywordFieldType) ft).setNormalizer(Lucene.KEYWORD_ANALYZER); + } + }); + } + @Override protected MappedFieldType createDefaultFieldType() { return new KeywordFieldMapper.KeywordFieldType(); @@ -62,6 +81,31 @@ public class KeywordFieldTypeTests extends FieldTypeTestCase { assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } + public void testTermQueryWithNormalizer() { + MappedFieldType ft = createDefaultFieldType(); + ft.setName("field"); + ft.setIndexOptions(IndexOptions.DOCS); + Analyzer normalizer = new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer in = new WhitespaceTokenizer(); + TokenFilter out = new LowerCaseFilter(in); + return new TokenStreamComponents(in, out); + } + @Override + protected TokenStream normalize(String fieldName, TokenStream in) { + return new LowerCaseFilter(in); + } + }; + ft.setSearchAnalyzer(new NamedAnalyzer("my_normalizer", AnalyzerScope.INDEX, normalizer)); + assertEquals(new TermQuery(new Term("field", "foo bar")), ft.termQuery("fOo BaR", null)); + + ft.setIndexOptions(IndexOptions.NONE); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> ft.termQuery("bar", null)); + assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + } + public void testTermsQuery() { MappedFieldType ft = createDefaultFieldType(); ft.setName("field"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java index 2b3aad750dd..225940d8eda 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java @@ -101,7 +101,7 @@ public class ParentFieldMapperTests extends ESSingleNodeTestCase { IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY); NamedAnalyzer namedAnalyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, namedAnalyzer, namedAnalyzer, namedAnalyzer, - Collections.emptyMap()); + Collections.emptyMap(), Collections.emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, xContentRegistry(), similarityService, new IndicesModule(emptyList()).getMapperRegistry(), () -> null); diff --git a/docs/reference/analysis.asciidoc b/docs/reference/analysis.asciidoc index 42b1ef65d24..a8299f45e19 100644 --- a/docs/reference/analysis.asciidoc +++ b/docs/reference/analysis.asciidoc @@ -112,6 +112,8 @@ include::analysis/testing.asciidoc[] include::analysis/analyzers.asciidoc[] +include::analysis/normalizers.asciidoc[] + include::analysis/tokenizers.asciidoc[] include::analysis/tokenfilters.asciidoc[] diff --git a/docs/reference/analysis/normalizers.asciidoc b/docs/reference/analysis/normalizers.asciidoc new file mode 100644 index 00000000000..0287f140c74 --- /dev/null +++ b/docs/reference/analysis/normalizers.asciidoc @@ -0,0 +1,57 @@ +[[analysis-normalizers]] +== Normalizers + +experimental[] + +Normalizers are similar to analyzers except that they may only emit a single +token. As a consequence, they do not have a tokenizer and only accept a subset +of the available char filters and token filters. Only the filters that work on +a per-character basis are allowed. For instance a lowercasing filter would be +allowed, but not a stemming filter, which needs to look at the keyword as a +whole. + +[float] +=== Custom analyzers + +Elasticsearch does not ship with built-in normalizers so far, so the only way +to get one is by building a custom one. Custom normalizers take a list of char +<> and a list of +<>. + +[source,js] +-------------------------------- +PUT index +{ + "settings": { + "analysis": { + "char_filter": { + "quote": { + "type": "mapping", + "mappings": [ + "« => \"", + "» => \"" + ] + } + }, + "normalizer": { + "my_normalizer": { + "type": "custom", + "char_filter": ["quote"], + "filter": ["lowercase", "asciifolding"] + } + } + } + }, + "mappings": { + "type": { + "properties": { + "foo": { + "type": "keyword", + "normalizer": "my_normalizer" + } + } + } + } +} +-------------------------------- +// CONSOLE diff --git a/docs/reference/mapping/params.asciidoc b/docs/reference/mapping/params.asciidoc index e7d2d7ac0c8..24220356233 100644 --- a/docs/reference/mapping/params.asciidoc +++ b/docs/reference/mapping/params.asciidoc @@ -8,6 +8,7 @@ parameters that are used by <>: The following mapping parameters are common to some or all field datatypes: * <> +* <> * <> * <> * <> @@ -34,6 +35,8 @@ The following mapping parameters are common to some or all field datatypes: include::params/analyzer.asciidoc[] +include::params/normalizer.asciidoc[] + include::params/boost.asciidoc[] include::params/coerce.asciidoc[] diff --git a/docs/reference/mapping/params/normalizer.asciidoc b/docs/reference/mapping/params/normalizer.asciidoc new file mode 100644 index 00000000000..c0636763e52 --- /dev/null +++ b/docs/reference/mapping/params/normalizer.asciidoc @@ -0,0 +1,163 @@ +[[normalizer]] +=== `normalizer` + +The `normalizer` property of <> fields is similar to +<> except that it guarantees that the analysis chain +produces a single token. + +The `normalizer` is applied prior to indexing the keyword, as well as at +search-time when the `keyword` field is searched via a query parser such as +the <> query. + +[source,js] +-------------------------------- +PUT index +{ + "settings": { + "analysis": { + "normalizer": { + "my_normalizer": { + "type": "custom", + "char_filter": [], + "filter": ["lowercase", "asciifolding"] + } + } + } + }, + "mappings": { + "type": { + "properties": { + "foo": { + "type": "keyword", + "normalizer": "my_normalizer" + } + } + } + } +} + +PUT index/type/1 +{ + "foo": "BÀR" +} + +PUT index/type/2 +{ + "foo": "bar" +} + +PUT index/type/3 +{ + "foo": "baz" +} + +POST index/_refresh + +GET index/_search +{ + "query": { + "match": { + "foo": "BAR" + } + } +} +-------------------------------- +// CONSOLE + +The above query matches documents 1 and 2 since `BÀR` is converted to `bar` at +both index and query time. + +[source,js] +---------------------------- +{ + "took": $body.took, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "failed": 0 + }, + "hits": { + "total": 2, + "max_score": 0.2876821, + "hits": [ + { + "_index": "index", + "_type": "type", + "_id": "2", + "_score": 0.2876821, + "_source": { + "foo": "bar" + } + }, + { + "_index": "index", + "_type": "type", + "_id": "1", + "_score": 0.2876821, + "_source": { + "foo": "BÀR" + } + } + ] + } +} +---------------------------- +// TESTRESPONSE[s/"took".*/"took": "$body.took",/] + +Also, the fact that keywords are converted prior to indexing also means that +aggregations return normalized values: + +[source,js] +---------------------------- +GET index/_search +{ + "size": 0, + "aggs": { + "foo_terms": { + "terms": { + "field": "foo" + } + } + } +} +-------------------------------- +// CONSOLE +// TEST[continued] + +returns + +[source,js] +---------------------------- +{ + "took": 43, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "failed": 0 + }, + "hits": { + "total": 3, + "max_score": 0.0, + "hits": [] + }, + "aggregations": { + "foo_terms": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [ + { + "key": "bar", + "doc_count": 2 + }, + { + "key": "baz", + "doc_count": 1 + } + ] + } + } +} +---------------------------- +// TESTRESPONSE[s/"took".*/"took": "$body.took",/] diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index 7c09ef46e55..316c92a73e6 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -109,6 +109,12 @@ The following parameters are accepted by `keyword` fields: Which scoring algorithm or _similarity_ should be used. Defaults to `classic`, which uses TF/IDF. +<>:: + + experimental[] + How to pre-process the keyword prior to indexing. Defaults to `null`, + meaning the keyword is kept as-is. + NOTE: Indexes imported from 2.x do not support `keyword`. Instead they will attempt to downgrade `keyword` into `string`. This allows you to merge modern mappings with legacy mappings. Long lived indexes will have to be recreated From f89bb18a5de6b7cfd6bc2dd589eb771efe3aec5c Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 30 Dec 2016 09:48:24 +0100 Subject: [PATCH 039/119] Dynamic `date` fields should use the `format` that was used to detect it is a date. (#22174) Unless the dynamic templates define an explicit format in the mapping definition: in that case the explicit mapping should have precedence. Closes #9410 --- .../index/mapper/DateFieldMapper.java | 7 +++ .../index/mapper/DocumentParser.java | 6 +++ .../index/mapper/DynamicMappingTests.java | 53 +++++++++++++++++++ 3 files changed, 66 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index ce03fc0a6b4..d2833d4bfb3 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -70,6 +70,7 @@ public class DateFieldMapper extends FieldMapper { private Boolean ignoreMalformed; private Locale locale; + private boolean dateTimeFormatterSet = false; public Builder(String name) { super(name, new DateFieldType(), new DateFieldType()); @@ -97,8 +98,14 @@ public class DateFieldMapper extends FieldMapper { return Defaults.IGNORE_MALFORMED; } + /** Whether an explicit format for this date field has been set already. */ + public boolean isDateTimeFormatterSet() { + return dateTimeFormatterSet; + } + public Builder dateTimeFormatter(FormatDateTimeFormatter dateTimeFormatter) { fieldType().setDateTimeFormatter(dateTimeFormatter); + dateTimeFormatterSet = true; return this; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index d7f32f4663b..a35b06a06ad 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -694,6 +694,12 @@ final class DocumentParser { if (builder == null) { builder = newDateBuilder(currentFieldName, dateTimeFormatter, Version.indexCreated(context.indexSettings())); } + if (builder instanceof DateFieldMapper.Builder) { + DateFieldMapper.Builder dateBuilder = (DateFieldMapper.Builder) builder; + if (dateBuilder.isDateTimeFormatterSet() == false) { + dateBuilder.dateTimeFormatter(dateTimeFormatter); + } + } return builder; } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 28a24f67ac9..f5c8d38503e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -644,6 +644,59 @@ public class DynamicMappingTests extends ESSingleNodeTestCase { assertThat(mapper, instanceOf(TextFieldMapper.class)); } + public void testDateDetectionInheritsFormat() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startArray("dynamic_date_formats") + .value("yyyy-MM-dd") + .endArray() + .startArray("dynamic_templates") + .startObject() + .startObject("dates") + .field("match_mapping_type", "date") + .field("match", "*2") + .startObject("mapping") + .endObject() + .endObject() + .endObject() + .startObject() + .startObject("dates") + .field("match_mapping_type", "date") + .field("match", "*3") + .startObject("mapping") + .field("format", "yyyy-MM-dd||epoch_millis") + .endObject() + .endObject() + .endObject() + .endArray() + .endObject().endObject().string(); + + IndexService index = createIndex("test"); + client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping).get(); + DocumentMapper defaultMapper = index.mapperService().documentMapper("type"); + + ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("date1", "2016-11-20") + .field("date2", "2016-11-20") + .field("date3", "2016-11-20") + .endObject() + .bytes()); + assertNotNull(doc.dynamicMappingsUpdate()); + assertAcked(client().admin().indices().preparePutMapping("test").setType("type").setSource(doc.dynamicMappingsUpdate().toString()).get()); + + defaultMapper = index.mapperService().documentMapper("type"); + + DateFieldMapper dateMapper1 = (DateFieldMapper) defaultMapper.mappers().smartNameFieldMapper("date1"); + DateFieldMapper dateMapper2 = (DateFieldMapper) defaultMapper.mappers().smartNameFieldMapper("date2"); + DateFieldMapper dateMapper3 = (DateFieldMapper) defaultMapper.mappers().smartNameFieldMapper("date3"); + // inherited from dynamic date format + assertEquals("yyyy-MM-dd", dateMapper1.fieldType().dateTimeFormatter().format()); + // inherited from dynamic date format since the mapping in the template did not specify a format + assertEquals("yyyy-MM-dd", dateMapper2.fieldType().dateTimeFormatter().format()); + // not inherited from the dynamic date format since the template defined an explicit format + assertEquals("yyyy-MM-dd||epoch_millis", dateMapper3.fieldType().dateTimeFormatter().format()); + } + public void testDynamicTemplateOrder() throws IOException { // https://github.com/elastic/elasticsearch/issues/18625 // elasticsearch used to apply templates that do not have a match_mapping_type first From f1d77219328c680703fb3a1953eb6dca691e2b2f Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 30 Dec 2016 10:12:24 +0100 Subject: [PATCH 040/119] Fix TermsAggregatorTests to not use LuceneTestCase.newSearcher since it needs a DirectoryReader. --- .../search/aggregations/bucket/terms/TermsAggregatorTests.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 60613e26d72..6f5a54965e3 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -52,7 +52,8 @@ public class TermsAggregatorTests extends AggregatorTestCase { indexWriter.close(); IndexReader indexReader = DirectoryReader.open(directory); - IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + // We do not use LuceneTestCase.newSearcher because we need a DirectoryReader + IndexSearcher indexSearcher = new IndexSearcher(indexReader); for (TermsAggregatorFactory.ExecutionMode executionMode : TermsAggregatorFactory.ExecutionMode.values()) { TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.STRING) From 00de5b83bdef6617e38a14449c879dd3a3f45ed8 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 30 Dec 2016 11:18:02 +0100 Subject: [PATCH 041/119] The percentage of deleted docs needs to be strictly over 10% for deleted docs to be expunged. --- .../java/org/elasticsearch/index/shard/IndexShardTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index ed81c237677..d9d6015e422 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1371,7 +1371,7 @@ public class IndexShardTests extends IndexShardTestCase { final long numDocs = randomIntBetween(2, 32); // at least two documents so we have docs to delete // Delete at least numDocs/10 documents otherwise the number of deleted docs will be below 10% // and forceMerge will refuse to expunge deletes - final long numDocsToDelete = randomIntBetween((int) Math.ceil(numDocs / 10.0), Math.toIntExact(numDocs)); + final long numDocsToDelete = randomIntBetween((int) Math.ceil(Math.nextUp(numDocs / 10.0)), Math.toIntExact(numDocs)); for (int i = 0; i < numDocs; i++) { final String id = Integer.toString(i); final ParsedDocument doc = From eaefb5f99b7a2a727f5f411207ed9e688b9a141d Mon Sep 17 00:00:00 2001 From: Dave Richardson Date: Fri, 30 Dec 2016 12:37:13 -0500 Subject: [PATCH 042/119] Descriptions for scripting enabled (#22388) The `Script source settings` section currently states that `false` means scripting is ENABLED. The other sections seem to indicate that `false` means scripting is DISABLED. If the current documentation is correct, that would imply that `inline` and `stored` scripting are ENABLED by default, which seems to conflict with all the other sections in the document. --- docs/reference/modules/scripting/security.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/modules/scripting/security.asciidoc b/docs/reference/modules/scripting/security.asciidoc index 3b40937c2d8..110e65e52c8 100644 --- a/docs/reference/modules/scripting/security.asciidoc +++ b/docs/reference/modules/scripting/security.asciidoc @@ -51,8 +51,8 @@ Each of these settings takes one of these values: [horizontal] -`false`:: Scripting is enabled. -`true`:: Scripting is disabled. +`false`:: Scripting is disabled. +`true`:: Scripting is enabled. The default values are the following: From 45d010e8746d420908d3651dce7fadc2ee7532c7 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 30 Dec 2016 15:15:00 +0100 Subject: [PATCH 043/119] Remove some usages of ParseFieldMatcher in favour of using ParseField directly Relates to #19552 Relates to #22130 --- .../functionscore/DecayFunctionParser.java | 2 +- .../filters/FiltersAggregationBuilder.java | 8 +- .../nested/NestedAggregationBuilder.java | 2 +- .../sampler/SamplerAggregationBuilder.java | 2 +- .../heuristics/ScriptHeuristic.java | 2 +- .../ScriptedMetricAggregationBuilder.java | 10 +-- .../tophits/TopHitsAggregationBuilder.java | 36 ++++----- .../aggregations/pipeline/BucketHelpers.java | 2 +- .../bucketmetrics/BucketMetricsParser.java | 8 +- ...tilesBucketPipelineAggregationBuilder.java | 2 +- .../extended/ExtendedStatsBucketParser.java | 2 +- ...ucketScriptPipelineAggregationBuilder.java | 14 ++-- ...ketSelectorPipelineAggregationBuilder.java | 12 +-- ...mulativeSumPipelineAggregationBuilder.java | 6 +- .../DerivativePipelineAggregationBuilder.java | 10 +-- .../MovAvgPipelineAggregationBuilder.java | 18 ++--- .../SerialDiffPipelineAggregationBuilder.java | 10 +-- .../search/builder/SearchSourceBuilder.java | 76 +++++++++---------- .../support/MultiValuesSourceParser.java | 4 +- 19 files changed, 113 insertions(+), 113 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java index 2c8b9af28d6..5a767ecc3a0 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java @@ -112,7 +112,7 @@ public final class DecayFunctionParser> im XContentBuilder builder = XContentFactory.jsonBuilder(); builder.copyCurrentStructure(parser); functionBytes = builder.bytes(); - } else if (context.getParseFieldMatcher().match(currentFieldName, MULTI_VALUE_MODE)) { + } else if (MULTI_VALUE_MODE.match(currentFieldName)) { multiValueMode = MultiValueMode.fromString(parser.text()); } else { throw new ParsingException(parser.getTokenLocation(), "malformed score function score parameters."); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregationBuilder.java index 10bbc03dd46..b970d809020 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregationBuilder.java @@ -217,21 +217,21 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder(); String key = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -247,7 +247,7 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { QueryBuilder filter = context.parseInnerQueryBuilder(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java index dbb08e792ad..1e70da265a7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java @@ -116,7 +116,7 @@ public class NestedAggregationBuilder extends AbstractAggregationBuilder scriptFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { String scriptFieldName = parser.currentName(); @@ -641,7 +641,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder> sorts = SortBuilder.fromXContent(context); factory.sorts(sorts); } else { @@ -682,10 +682,10 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder fieldDataFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -696,10 +696,10 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder> sorts = SortBuilder.fromXContent(context); factory.sorts(sorts); - } else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder._SOURCE_FIELD)) { + } else if (SearchSourceBuilder._SOURCE_FIELD.match(currentFieldName)) { factory.fetchSource(FetchSourceContext.parse(context.parser())); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java index 98b5b67b7cf..d2a53ca343b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java @@ -65,7 +65,7 @@ public class BucketHelpers { public static GapPolicy parse(QueryParseContext context, String text, XContentLocation tokenLocation) { GapPolicy result = null; for (GapPolicy policy : values()) { - if (context.getParseFieldMatcher().match(text, policy.parseField)) { + if (policy.parseField.match(text)) { if (result == null) { result = policy; } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsParser.java index 9dee002ca29..e7954174aa3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsParser.java @@ -58,17 +58,17 @@ public abstract class BucketMetricsParser implements PipelineAggregator.Parser { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (context.getParseFieldMatcher().match(currentFieldName, FORMAT)) { + if (FORMAT.match(currentFieldName)) { format = parser.text(); - } else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + } else if (BUCKETS_PATH.match(currentFieldName)) { bucketsPaths = new String[] { parser.text() }; - } else if (context.getParseFieldMatcher().match(currentFieldName, GAP_POLICY)) { + } else if (GAP_POLICY.match(currentFieldName)) { gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); } else { parseToken(pipelineAggregatorName, parser, context, currentFieldName, token, params); } } else if (token == XContentParser.Token.START_ARRAY) { - if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + if (BUCKETS_PATH.match(currentFieldName)) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java index 81df16f2bf7..cea7d011367 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/PercentilesBucketPipelineAggregationBuilder.java @@ -138,7 +138,7 @@ public class PercentilesBucketPipelineAggregationBuilder @Override protected boolean token(XContentParser parser, QueryParseContext context, String field, XContentParser.Token token, Map params) throws IOException { - if (context.getParseFieldMatcher().match(field, PERCENTS_FIELD) && token == XContentParser.Token.START_ARRAY) { + if (PERCENTS_FIELD.match(field) && token == XContentParser.Token.START_ARRAY) { DoubleArrayList percents = new DoubleArrayList(10); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { percents.add(parser.doubleValue()); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketParser.java index b7fa49267dc..dfa28c3dd27 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/stats/extended/ExtendedStatsBucketParser.java @@ -46,7 +46,7 @@ public class ExtendedStatsBucketParser extends BucketMetricsParser { @Override protected boolean token(XContentParser parser, QueryParseContext context, String field, XContentParser.Token token, Map params) throws IOException { - if (context.getParseFieldMatcher().match(field, SIGMA) && token == XContentParser.Token.VALUE_NUMBER) { + if (SIGMA.match(field) && token == XContentParser.Token.VALUE_NUMBER) { params.put(SIGMA.getPreferredName(), parser.doubleValue()); return true; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java index e04f26654ea..320a84c786b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregationBuilder.java @@ -170,21 +170,21 @@ public class BucketScriptPipelineAggregationBuilder extends AbstractPipelineAggr if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (context.getParseFieldMatcher().match(currentFieldName, FORMAT)) { + if (FORMAT.match(currentFieldName)) { format = parser.text(); - } else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + } else if (BUCKETS_PATH.match(currentFieldName)) { bucketsPathsMap = new HashMap<>(); bucketsPathsMap.put("_value", parser.text()); - } else if (context.getParseFieldMatcher().match(currentFieldName, GAP_POLICY)) { + } else if (GAP_POLICY.match(currentFieldName)) { gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); - } else if (context.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { + } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + if (BUCKETS_PATH.match(currentFieldName)) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); @@ -199,9 +199,9 @@ public class BucketScriptPipelineAggregationBuilder extends AbstractPipelineAggr "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_OBJECT) { - if (context.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { + if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); - } else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + } else if (BUCKETS_PATH.match(currentFieldName)) { Map map = parser.map(); bucketsPathsMap = new HashMap<>(); for (Map.Entry entry : map.entrySet()) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java index 877be6ea54f..8a88f011756 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java @@ -135,19 +135,19 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + if (BUCKETS_PATH.match(currentFieldName)) { bucketsPathsMap = new HashMap<>(); bucketsPathsMap.put("_value", parser.text()); - } else if (context.getParseFieldMatcher().match(currentFieldName, GAP_POLICY)) { + } else if (GAP_POLICY.match(currentFieldName)) { gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); - } else if (context.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { + } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + if (BUCKETS_PATH.match(currentFieldName)) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); @@ -162,9 +162,9 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_OBJECT) { - if (context.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { + if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); - } else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + } else if (BUCKETS_PATH.match(currentFieldName)) { Map map = parser.map(); bucketsPathsMap = new HashMap<>(); for (Map.Entry entry : map.entrySet()) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.java index a83787f1365..5ac185990b4 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregationBuilder.java @@ -141,16 +141,16 @@ public class CumulativeSumPipelineAggregationBuilder extends AbstractPipelineAgg if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (context.getParseFieldMatcher().match(currentFieldName, FORMAT)) { + if (FORMAT.match(currentFieldName)) { format = parser.text(); - } else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + } else if (BUCKETS_PATH.match(currentFieldName)) { bucketsPaths = new String[] { parser.text() }; } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + if (BUCKETS_PATH.match(currentFieldName)) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java index ded2d110206..bb2a1c01a50 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java @@ -207,20 +207,20 @@ public class DerivativePipelineAggregationBuilder extends AbstractPipelineAggreg if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (context.getParseFieldMatcher().match(currentFieldName, FORMAT_FIELD)) { + if (FORMAT_FIELD.match(currentFieldName)) { format = parser.text(); - } else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH_FIELD)) { + } else if (BUCKETS_PATH_FIELD.match(currentFieldName)) { bucketsPaths = new String[] { parser.text() }; - } else if (context.getParseFieldMatcher().match(currentFieldName, GAP_POLICY_FIELD)) { + } else if (GAP_POLICY_FIELD.match(currentFieldName)) { gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); - } else if (context.getParseFieldMatcher().match(currentFieldName, UNIT_FIELD)) { + } else if (UNIT_FIELD.match(currentFieldName)) { units = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH_FIELD)) { + if (BUCKETS_PATH_FIELD.match(currentFieldName)) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java index f0aa1f81126..dc445093f1d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java @@ -322,13 +322,13 @@ public class MovAvgPipelineAggregationBuilder extends AbstractPipelineAggregatio if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (context.getParseFieldMatcher().match(currentFieldName, WINDOW)) { + if (WINDOW.match(currentFieldName)) { window = parser.intValue(); if (window <= 0) { throw new ParsingException(parser.getTokenLocation(), "[" + currentFieldName + "] value must be a positive, " + "non-zero integer. Value supplied was [" + predict + "] in [" + pipelineAggregatorName + "]."); } - } else if (context.getParseFieldMatcher().match(currentFieldName, PREDICT)) { + } else if (PREDICT.match(currentFieldName)) { predict = parser.intValue(); if (predict <= 0) { throw new ParsingException(parser.getTokenLocation(), "[" + currentFieldName + "] value must be a positive integer." @@ -339,20 +339,20 @@ public class MovAvgPipelineAggregationBuilder extends AbstractPipelineAggregatio "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.VALUE_STRING) { - if (context.getParseFieldMatcher().match(currentFieldName, FORMAT)) { + if (FORMAT.match(currentFieldName)) { format = parser.text(); - } else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + } else if (BUCKETS_PATH.match(currentFieldName)) { bucketsPaths = new String[] { parser.text() }; - } else if (context.getParseFieldMatcher().match(currentFieldName, GAP_POLICY)) { + } else if (GAP_POLICY.match(currentFieldName)) { gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); - } else if (context.getParseFieldMatcher().match(currentFieldName, MODEL)) { + } else if (MODEL.match(currentFieldName)) { model = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + if (BUCKETS_PATH.match(currentFieldName)) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); @@ -364,14 +364,14 @@ public class MovAvgPipelineAggregationBuilder extends AbstractPipelineAggregatio "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_OBJECT) { - if (context.getParseFieldMatcher().match(currentFieldName, SETTINGS)) { + if (SETTINGS.match(currentFieldName)) { settings = parser.map(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - if (context.getParseFieldMatcher().match(currentFieldName, MINIMIZE)) { + if (MINIMIZE.match(currentFieldName)) { minimize = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregationBuilder.java index f20a4f8da42..0acd4c7f1b7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/serialdiff/SerialDiffPipelineAggregationBuilder.java @@ -162,18 +162,18 @@ public class SerialDiffPipelineAggregationBuilder extends AbstractPipelineAggreg if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (context.getParseFieldMatcher().match(currentFieldName, FORMAT)) { + if (FORMAT.match(currentFieldName)) { format = parser.text(); - } else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + } else if (BUCKETS_PATH.match(currentFieldName)) { bucketsPaths = new String[] { parser.text() }; - } else if (context.getParseFieldMatcher().match(currentFieldName, GAP_POLICY)) { + } else if (GAP_POLICY.match(currentFieldName)) { gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (context.getParseFieldMatcher().match(currentFieldName, LAG)) { + if (LAG.match(currentFieldName)) { lag = parser.intValue(true); if (lag <= 0) { throw new ParsingException(parser.getTokenLocation(), @@ -186,7 +186,7 @@ public class SerialDiffPipelineAggregationBuilder extends AbstractPipelineAggreg "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { + if (BUCKETS_PATH.match(currentFieldName)) { List paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index f7ffcfea137..b7ac57be43e 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -931,32 +931,32 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (context.getParseFieldMatcher().match(currentFieldName, FROM_FIELD)) { + if (FROM_FIELD.match(currentFieldName)) { from = parser.intValue(); - } else if (context.getParseFieldMatcher().match(currentFieldName, SIZE_FIELD)) { + } else if (SIZE_FIELD.match(currentFieldName)) { size = parser.intValue(); - } else if (context.getParseFieldMatcher().match(currentFieldName, TIMEOUT_FIELD)) { + } else if (TIMEOUT_FIELD.match(currentFieldName)) { timeout = TimeValue.parseTimeValue(parser.text(), null, TIMEOUT_FIELD.getPreferredName()); - } else if (context.getParseFieldMatcher().match(currentFieldName, TERMINATE_AFTER_FIELD)) { + } else if (TERMINATE_AFTER_FIELD.match(currentFieldName)) { terminateAfter = parser.intValue(); - } else if (context.getParseFieldMatcher().match(currentFieldName, MIN_SCORE_FIELD)) { + } else if (MIN_SCORE_FIELD.match(currentFieldName)) { minScore = parser.floatValue(); - } else if (context.getParseFieldMatcher().match(currentFieldName, VERSION_FIELD)) { + } else if (VERSION_FIELD.match(currentFieldName)) { version = parser.booleanValue(); - } else if (context.getParseFieldMatcher().match(currentFieldName, EXPLAIN_FIELD)) { + } else if (EXPLAIN_FIELD.match(currentFieldName)) { explain = parser.booleanValue(); - } else if (context.getParseFieldMatcher().match(currentFieldName, TRACK_SCORES_FIELD)) { + } else if (TRACK_SCORES_FIELD.match(currentFieldName)) { trackScores = parser.booleanValue(); - } else if (context.getParseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) { + } else if (_SOURCE_FIELD.match(currentFieldName)) { fetchSourceContext = FetchSourceContext.parse(context.parser()); - } else if (context.getParseFieldMatcher().match(currentFieldName, STORED_FIELDS_FIELD)) { + } else if (STORED_FIELDS_FIELD.match(currentFieldName)) { storedFieldsContext = StoredFieldsContext.fromXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), context); - } else if (context.getParseFieldMatcher().match(currentFieldName, SORT_FIELD)) { + } else if (SORT_FIELD.match(currentFieldName)) { sort(parser.text()); - } else if (context.getParseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) { + } else if (PROFILE_FIELD.match(currentFieldName)) { profile = parser.booleanValue(); - } else if (context.getParseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) { + } else if (FIELDS_FIELD.match(currentFieldName)) { throw new ParsingException(parser.getTokenLocation(), "Deprecated field [" + SearchSourceBuilder.FIELDS_FIELD + "] used, expected [" + SearchSourceBuilder.STORED_FIELDS_FIELD + "] instead"); @@ -965,18 +965,18 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { - if (context.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) { + if (QUERY_FIELD.match(currentFieldName)) { queryBuilder = context.parseInnerQueryBuilder(); - } else if (context.getParseFieldMatcher().match(currentFieldName, POST_FILTER_FIELD)) { + } else if (POST_FILTER_FIELD.match(currentFieldName)) { postQueryBuilder = context.parseInnerQueryBuilder(); - } else if (context.getParseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) { + } else if (_SOURCE_FIELD.match(currentFieldName)) { fetchSourceContext = FetchSourceContext.parse(context.parser()); - } else if (context.getParseFieldMatcher().match(currentFieldName, SCRIPT_FIELDS_FIELD)) { + } else if (SCRIPT_FIELDS_FIELD.match(currentFieldName)) { scriptFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { scriptFields.add(new ScriptField(context)); } - } else if (context.getParseFieldMatcher().match(currentFieldName, INDICES_BOOST_FIELD)) { + } else if (INDICES_BOOST_FIELD.match(currentFieldName)) { DEPRECATION_LOGGER.deprecated( "Object format in indices_boost is deprecated, please use array format instead"); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -989,19 +989,19 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ " in [" + currentFieldName + "].", parser.getTokenLocation()); } } - } else if (context.getParseFieldMatcher().match(currentFieldName, AGGREGATIONS_FIELD) - || context.getParseFieldMatcher().match(currentFieldName, AGGS_FIELD)) { + } else if (AGGREGATIONS_FIELD.match(currentFieldName) + || AGGS_FIELD.match(currentFieldName)) { aggregations = aggParsers.parseAggregators(context); - } else if (context.getParseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) { + } else if (HIGHLIGHT_FIELD.match(currentFieldName)) { highlightBuilder = HighlightBuilder.fromXContent(context); - } else if (context.getParseFieldMatcher().match(currentFieldName, SUGGEST_FIELD)) { + } else if (SUGGEST_FIELD.match(currentFieldName)) { suggestBuilder = SuggestBuilder.fromXContent(context, suggesters); - } else if (context.getParseFieldMatcher().match(currentFieldName, SORT_FIELD)) { + } else if (SORT_FIELD.match(currentFieldName)) { sorts = new ArrayList<>(SortBuilder.fromXContent(context)); - } else if (context.getParseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) { + } else if (RESCORE_FIELD.match(currentFieldName)) { rescoreBuilders = new ArrayList<>(); rescoreBuilders.add(RescoreBuilder.parseFromXContent(context)); - } else if (context.getParseFieldMatcher().match(currentFieldName, EXT_FIELD)) { + } else if (EXT_FIELD.match(currentFieldName)) { extBuilders = new ArrayList<>(); String extSectionName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -1019,16 +1019,16 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ extBuilders.add(searchExtBuilder); } } - } else if (context.getParseFieldMatcher().match(currentFieldName, SLICE)) { + } else if (SLICE.match(currentFieldName)) { sliceBuilder = SliceBuilder.fromXContent(context); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_ARRAY) { - if (context.getParseFieldMatcher().match(currentFieldName, STORED_FIELDS_FIELD)) { + if (STORED_FIELDS_FIELD.match(currentFieldName)) { storedFieldsContext = StoredFieldsContext.fromXContent(STORED_FIELDS_FIELD.getPreferredName(), context); - } else if (context.getParseFieldMatcher().match(currentFieldName, DOCVALUE_FIELDS_FIELD)) { + } else if (DOCVALUE_FIELDS_FIELD.match(currentFieldName)) { docValueFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -1038,18 +1038,18 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); } } - } else if (context.getParseFieldMatcher().match(currentFieldName, INDICES_BOOST_FIELD)) { + } else if (INDICES_BOOST_FIELD.match(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { indexBoosts.add(new IndexBoost(context)); } - } else if (context.getParseFieldMatcher().match(currentFieldName, SORT_FIELD)) { + } else if (SORT_FIELD.match(currentFieldName)) { sorts = new ArrayList<>(SortBuilder.fromXContent(context)); - } else if (context.getParseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) { + } else if (RESCORE_FIELD.match(currentFieldName)) { rescoreBuilders = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { rescoreBuilders.add(RescoreBuilder.parseFromXContent(context)); } - } else if (context.getParseFieldMatcher().match(currentFieldName, STATS_FIELD)) { + } else if (STATS_FIELD.match(currentFieldName)) { stats = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -1059,11 +1059,11 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); } } - } else if (context.getParseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) { + } else if (_SOURCE_FIELD.match(currentFieldName)) { fetchSourceContext = FetchSourceContext.parse(context.parser()); - } else if (context.getParseFieldMatcher().match(currentFieldName, SEARCH_AFTER)) { + } else if (SEARCH_AFTER.match(currentFieldName)) { searchAfterBuilder = SearchAfterBuilder.fromXContent(parser, context.getParseFieldMatcher()); - } else if (context.getParseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) { + } else if (FIELDS_FIELD.match(currentFieldName)) { throw new ParsingException(parser.getTokenLocation(), "The field [" + SearchSourceBuilder.FIELDS_FIELD + "] is no longer supported, please use [" + SearchSourceBuilder.STORED_FIELDS_FIELD + "] to retrieve stored fields or _source filtering " + @@ -1341,16 +1341,16 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (context.getParseFieldMatcher().match(currentFieldName, SCRIPT_FIELD)) { + if (SCRIPT_FIELD.match(currentFieldName)) { script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); - } else if (context.getParseFieldMatcher().match(currentFieldName, IGNORE_FAILURE_FIELD)) { + } else if (IGNORE_FAILURE_FIELD.match(currentFieldName)) { ignoreFailure = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { - if (context.getParseFieldMatcher().match(currentFieldName, SCRIPT_FIELD)) { + if (SCRIPT_FIELD.match(currentFieldName)) { script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java index 530c13ca2ce..4311f975c1d 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java @@ -111,7 +111,7 @@ public abstract class MultiValuesSourceParser implement while (parser.nextToken() != XContentParser.Token.END_OBJECT) { parseMissingAndAdd(aggregationName, currentFieldName, parser, missingMap); } - } else if (context.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { + } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " + "Multi-field aggregations do not support scripts."); @@ -121,7 +121,7 @@ public abstract class MultiValuesSourceParser implement "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (context.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { + if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " + "Multi-field aggregations do not support scripts."); From 6c54cbade4f8510370a94d572379e2bc7d519faa Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 30 Dec 2016 15:24:57 +0100 Subject: [PATCH 044/119] Remove some more usages of ParseFieldMatcher in favour of using ParseField directly Relates to #19552 Relates to #22130 --- .../index/query/BoolQueryBuilder.java | 12 ++-- .../index/query/BoostingQueryBuilder.java | 10 ++-- .../index/query/CommonTermsQueryBuilder.java | 24 ++++---- .../query/ConstantScoreQueryBuilder.java | 6 +- .../index/query/DisMaxQueryBuilder.java | 10 ++-- .../index/query/ExistsQueryBuilder.java | 6 +- .../query/FieldMaskingSpanQueryBuilder.java | 8 +-- .../index/query/FuzzyQueryBuilder.java | 18 +++--- .../query/GeoBoundingBoxQueryBuilder.java | 32 +++++----- .../index/query/GeoDistanceQueryBuilder.java | 20 +++---- .../index/query/GeoPolygonQueryBuilder.java | 10 ++-- .../index/query/GeoShapeQueryBuilder.java | 22 +++---- .../index/query/HasChildQueryBuilder.java | 18 +++--- .../index/query/HasParentQueryBuilder.java | 16 ++--- .../index/query/MatchNoneQueryBuilder.java | 4 +- .../query/MatchPhrasePrefixQueryBuilder.java | 12 ++-- .../index/query/MatchPhraseQueryBuilder.java | 10 ++-- .../index/query/MatchQueryBuilder.java | 32 +++++----- .../index/query/MoreLikeThisQueryBuilder.java | 44 +++++++------- .../index/query/MultiMatchQueryBuilder.java | 36 +++++------ .../index/query/NestedQueryBuilder.java | 14 ++--- .../index/query/ParentIdQueryBuilder.java | 10 ++-- .../index/query/PrefixQueryBuilder.java | 8 +-- .../index/query/QueryStringQueryBuilder.java | 60 +++++++++---------- .../index/query/RangeQueryBuilder.java | 30 +++++----- .../index/query/RegexpQueryBuilder.java | 16 ++--- .../index/query/ScriptQueryBuilder.java | 8 +-- .../index/query/SimpleQueryStringBuilder.java | 28 ++++----- .../query/SpanContainingQueryBuilder.java | 8 +-- .../index/query/SpanFirstQueryBuilder.java | 8 +-- .../query/SpanMultiTermQueryBuilder.java | 6 +- .../index/query/SpanNearQueryBuilder.java | 10 ++-- .../index/query/SpanNotQueryBuilder.java | 14 ++--- .../index/query/SpanOrQueryBuilder.java | 6 +- .../index/query/SpanTermQueryBuilder.java | 8 +-- .../index/query/SpanWithinQueryBuilder.java | 8 +-- .../index/query/TermQueryBuilder.java | 8 +-- .../index/query/TermsQueryBuilder.java | 4 +- .../index/query/TypeQueryBuilder.java | 6 +- .../index/query/WildcardQueryBuilder.java | 10 ++-- .../index/query/WrapperQueryBuilder.java | 2 +- .../FunctionScoreQueryBuilder.java | 22 +++---- .../ScriptScoreFunctionBuilder.java | 2 +- .../search/rescore/RescoreBuilder.java | 2 +- .../search/suggest/phrase/Laplace.java | 2 +- .../search/suggest/phrase/StupidBackoff.java | 2 +- .../percolator/PercolateQueryBuilder.java | 22 +++---- 47 files changed, 337 insertions(+), 337 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java index 7866035dfac..e5416c131a6 100644 --- a/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java @@ -358,17 +358,17 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { } } } else if (token.isValue()) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, DISABLE_COORD_FIELD)) { + if (DISABLE_COORD_FIELD.match(currentFieldName)) { disableCoord = parser.booleanValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH)) { + } else if (MINIMUM_SHOULD_MATCH.match(currentFieldName)) { minimumShouldMatch = parser.textOrNull(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_NUMBER_SHOULD_MATCH)) { + } else if (MINIMUM_NUMBER_SHOULD_MATCH.match(currentFieldName)) { minimumShouldMatch = parser.textOrNull(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ADJUST_PURE_NEGATIVE)) { + } else if (ADJUST_PURE_NEGATIVE.match(currentFieldName)) { adjustPureNegative = parser.booleanValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[bool] query does not support [" + currentFieldName + "]"); diff --git a/core/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java index 19f7a1b6ea1..cb16faf9b39 100644 --- a/core/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java @@ -153,21 +153,21 @@ public class BoostingQueryBuilder extends AbstractQueryBuilder if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERIES_FIELD)) { + if (QUERIES_FIELD.match(currentFieldName)) { queriesFound = true; queries.add(parseContext.parseInnerQueryBuilder()); } else { throw new ParsingException(parser.getTokenLocation(), "[dis_max] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERIES_FIELD)) { + if (QUERIES_FIELD.match(currentFieldName)) { queriesFound = true; while (token != XContentParser.Token.END_ARRAY) { queries.add(parseContext.parseInnerQueryBuilder()); @@ -154,11 +154,11 @@ public class DisMaxQueryBuilder extends AbstractQueryBuilder throw new ParsingException(parser.getTokenLocation(), "[dis_max] query does not support [" + currentFieldName + "]"); } } else { - if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TIE_BREAKER_FIELD)) { + } else if (TIE_BREAKER_FIELD.match(currentFieldName)) { tieBreaker = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[dis_max] query does not support [" + currentFieldName + "]"); diff --git a/core/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java index a6f85f96be4..07d652cb333 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java @@ -96,11 +96,11 @@ public class ExistsQueryBuilder extends AbstractQueryBuilder if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, FIELD_FIELD)) { + if (FIELD_FIELD.match(currentFieldName)) { fieldPattern = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); } else { throw new ParsingException(parser.getTokenLocation(), "[" + ExistsQueryBuilder.NAME + diff --git a/core/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java index 2bd384aff95..a7ef6d9551b 100644 --- a/core/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java @@ -115,7 +115,7 @@ public class FieldMaskingSpanQueryBuilder extends AbstractQueryBuilder i if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) { + if (TERM_FIELD.match(currentFieldName)) { value = parser.objectBytes(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) { + } else if (VALUE_FIELD.match(currentFieldName)) { value = parser.objectBytes(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) { + } else if (Fuzziness.FIELD.match(currentFieldName)) { fuzziness = Fuzziness.parse(parser); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) { + } else if (PREFIX_LENGTH_FIELD.match(currentFieldName)) { prefixLength = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) { + } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName)) { maxExpansions = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TRANSPOSITIONS_FIELD)) { + } else if (TRANSPOSITIONS_FIELD.match(currentFieldName)) { transpositions = parser.booleanValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) { + } else if (REWRITE_FIELD.match(currentFieldName)) { rewrite = parser.textOrNull(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 07b39ba12c1..00371ce7a63 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -408,30 +408,30 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder(); while ((token = parser.nextToken()) != Token.END_ARRAY) { shell.add(GeoUtils.parseGeoPoint(parser)); @@ -271,16 +271,16 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) { + if (QUERY_FIELD.match(currentFieldName)) { value = parser.objectText(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TYPE_FIELD)) { + } else if (TYPE_FIELD.match(currentFieldName)) { String tStr = parser.text(); if ("boolean".equals(tStr)) { type = MatchQuery.Type.BOOLEAN; @@ -569,31 +569,31 @@ public class MatchQueryBuilder extends AbstractQueryBuilder { } else { throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query does not support type " + tStr); } - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ANALYZER_FIELD)) { + } else if (ANALYZER_FIELD.match(currentFieldName)) { analyzer = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) { + } else if (SLOP_FIELD.match(currentFieldName)) { slop = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) { + } else if (Fuzziness.FIELD.match(currentFieldName)) { fuzziness = Fuzziness.parse(parser); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) { + } else if (PREFIX_LENGTH_FIELD.match(currentFieldName)) { prefixLength = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) { + } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName)) { maxExpansion = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, OPERATOR_FIELD)) { + } else if (OPERATOR_FIELD.match(currentFieldName)) { operator = Operator.fromString(parser.text()); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) { + } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName)) { minimumShouldMatch = parser.textOrNull(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_REWRITE_FIELD)) { + } else if (FUZZY_REWRITE_FIELD.match(currentFieldName)) { fuzzyRewrite = parser.textOrNull(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_TRANSPOSITIONS_FIELD)) { + } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName)) { fuzzyTranspositions = parser.booleanValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, LENIENT_FIELD)) { + } else if (LENIENT_FIELD.match(currentFieldName)) { lenient = parser.booleanValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, CUTOFF_FREQUENCY_FIELD)) { + } else if (CUTOFF_FREQUENCY_FIELD.match(currentFieldName)) { cutOffFrequency = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ZERO_TERMS_QUERY_FIELD)) { + } else if (ZERO_TERMS_QUERY_FIELD.match(currentFieldName)) { String zeroTermsDocs = parser.text(); if ("none".equalsIgnoreCase(zeroTermsDocs)) { zeroTermsQuery = MatchQuery.ZeroTermsQuery.NONE; @@ -603,7 +603,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder { throw new ParsingException(parser.getTokenLocation(), "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); } - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index e776f4953d2..e4df758f2ba 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -834,33 +834,33 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { fields.add(parser.text()); } - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Field.LIKE)) { + } else if (Field.LIKE.match(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { parseLikeField(parseContext, likeTexts, likeItems); } - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Field.UNLIKE)) { + } else if (Field.UNLIKE.match(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { parseLikeField(parseContext, unlikeTexts, unlikeItems); } - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Field.IDS)) { + } else if (Field.IDS.match(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (!token.isValue()) { throw new IllegalArgumentException("ids array element should only contain ids"); } likeItems.add(new Item(null, null, parser.text())); } - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Field.DOCS)) { + } else if (Field.DOCS.match(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException("docs array element should include an object"); } likeItems.add(Item.parse(parser, parseContext.getParseFieldMatcher(), new Item())); } - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Field.STOP_WORDS)) { + } else if (Field.STOP_WORDS.match(currentFieldName)) { stopWords = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { stopWords.add(parser.text()); @@ -906,9 +906,9 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) { + if (QUERY_FIELD.match(currentFieldName)) { query = parseContext.parseInnerQueryBuilder(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) { + } else if (INNER_HITS_FIELD.match(currentFieldName)) { innerHitBuilder = InnerHitBuilder.fromXContent(parseContext); } else { throw new ParsingException(parser.getTokenLocation(), "[nested] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, PATH_FIELD)) { + if (PATH_FIELD.match(currentFieldName)) { path = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, IGNORE_UNMAPPED_FIELD)) { + } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName)) { ignoreUnmapped = parser.booleanValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, SCORE_MODE_FIELD)) { + } else if (SCORE_MODE_FIELD.match(currentFieldName)) { scoreMode = HasChildQueryBuilder.parseScoreMode(parser.text()); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[nested] query does not support [" + currentFieldName + "]"); diff --git a/core/src/main/java/org/elasticsearch/index/query/ParentIdQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ParentIdQueryBuilder.java index f85e232032a..f5608df0703 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ParentIdQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/ParentIdQueryBuilder.java @@ -128,15 +128,15 @@ public final class ParentIdQueryBuilder extends AbstractQueryBuilder if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_FIELD)) { + } else if (PREFIX_FIELD.match(currentFieldName)) { value = parser.textOrNull(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) { + } else if (REWRITE_FIELD.match(currentFieldName)) { rewrite = parser.textOrNull(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index e9e194c6a28..f60aa56d0b9 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -720,7 +720,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder i if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (parseContext.getParseFieldMatcher().match(currentFieldName, FROM_FIELD)) { + if (FROM_FIELD.match(currentFieldName)) { from = parser.objectBytes(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TO_FIELD)) { + } else if (TO_FIELD.match(currentFieldName)) { to = parser.objectBytes(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, INCLUDE_LOWER_FIELD)) { + } else if (INCLUDE_LOWER_FIELD.match(currentFieldName)) { includeLower = parser.booleanValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, INCLUDE_UPPER_FIELD)) { + } else if (INCLUDE_UPPER_FIELD.match(currentFieldName)) { includeUpper = parser.booleanValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, GT_FIELD)) { + } else if (GT_FIELD.match(currentFieldName)) { from = parser.objectBytes(); includeLower = false; - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, GTE_FIELD)) { + } else if (GTE_FIELD.match(currentFieldName)) { from = parser.objectBytes(); includeLower = true; - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, LT_FIELD)) { + } else if (LT_FIELD.match(currentFieldName)) { to = parser.objectBytes(); includeUpper = false; - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, LTE_FIELD)) { + } else if (LTE_FIELD.match(currentFieldName)) { to = parser.objectBytes(); includeUpper = true; - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, TIME_ZONE_FIELD)) { + } else if (TIME_ZONE_FIELD.match(currentFieldName)) { timeZone = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FORMAT_FIELD)) { + } else if (FORMAT_FIELD.match(currentFieldName)) { format = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, RELATION_FIELD)) { + } else if (RELATION_FIELD.match(currentFieldName)) { relation = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), @@ -408,9 +408,9 @@ public class RangeQueryBuilder extends AbstractQueryBuilder i } } } else if (token.isValue()) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, NAME_FIELD)) { + if (NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FIELDDATA_FIELD)) { + } else if (FIELDDATA_FIELD.match(currentFieldName)) { // ignore } else { throw new ParsingException(parser.getTokenLocation(), "[range] query does not support [" + currentFieldName + "]"); diff --git a/core/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java index e5632e96c81..cfd7b428490 100644 --- a/core/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java @@ -200,20 +200,20 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) { + if (VALUE_FIELD.match(currentFieldName)) { value = parser.textOrNull(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) { + } else if (REWRITE_FIELD.match(currentFieldName)) { rewrite = parser.textOrNull(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FLAGS_FIELD)) { + } else if (FLAGS_FIELD.match(currentFieldName)) { String flags = parser.textOrNull(); flagsValue = RegexpFlag.resolveValue(flags); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_DETERMINIZED_STATES_FIELD)) { + } else if (MAX_DETERMINIZED_STATES_FIELD.match(currentFieldName)) { maxDeterminizedStates = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, FLAGS_VALUE_FIELD)) { + } else if (FLAGS_VALUE_FIELD.match(currentFieldName)) { flagsValue = parser.intValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), @@ -222,7 +222,7 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder } } } else { - if (parseContext.getParseFieldMatcher().match(currentFieldName, NAME_FIELD)) { + if (NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); } else { throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName()); diff --git a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index 377aeb103d6..077a687ed83 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -99,17 +99,17 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder } else if (parseContext.isDeprecatedSetting(currentFieldName)) { // skip } else if (token == XContentParser.Token.START_OBJECT) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { + if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { script = Script.parse(parser, parseContext.getParseFieldMatcher(), parseContext.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { + } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { script = Script.parse(parser, parseContext.getParseFieldMatcher(), parseContext.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]"); diff --git a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index 8025b25c30c..4e3b9d98b1e 100644 --- a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -492,7 +492,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, CLAUSES_FIELD)) { + if (CLAUSES_FIELD.match(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { QueryBuilder query = parseContext.parseInnerQueryBuilder(); if (query instanceof SpanQueryBuilder == false) { @@ -123,9 +123,9 @@ public class SpanOrQueryBuilder extends AbstractQueryBuilder throw new ParsingException(parser.getTokenLocation(), "[span_or] query does not support [" + currentFieldName + "]"); } } else { - if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "[span_or] query does not support [" + currentFieldName + "]"); diff --git a/core/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java index fae55b35e66..67319dfe6e4 100644 --- a/core/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/SpanTermQueryBuilder.java @@ -109,13 +109,13 @@ public class SpanTermQueryBuilder extends BaseTermQueryBuilder { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) { + if (TERM_FIELD.match(currentFieldName)) { value = parser.objectBytes(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) { + } else if (VALUE_FIELD.match(currentFieldName)) { value = parser.objectBytes(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/core/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java index c301aedcd89..7ef57a8a79e 100644 --- a/core/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java @@ -353,9 +353,9 @@ public class TermsQueryBuilder extends AbstractQueryBuilder { fieldName = currentFieldName; termsLookup = TermsLookup.parseTermsLookup(parser); } else if (token.isValue()) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java index e7fb520faf1..88e655555f8 100644 --- a/core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java @@ -93,11 +93,11 @@ public class TypeQueryBuilder extends AbstractQueryBuilder { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { queryName = parser.text(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) { + } else if (VALUE_FIELD.match(currentFieldName)) { type = parser.utf8Bytes(); } else { throw new ParsingException(parser.getTokenLocation(), diff --git a/core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java index bfe173664f4..50d9c6e6a4c 100644 --- a/core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java @@ -153,15 +153,15 @@ public class WildcardQueryBuilder extends AbstractQueryBuilder> extends ToXC if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } else if (token.isValue()) { - if (parseContext.getParseFieldMatcher().match(fieldName, WINDOW_SIZE_FIELD)) { + if (WINDOW_SIZE_FIELD.match(fieldName)) { windowSize = parser.intValue(); } else { throw new ParsingException(parser.getTokenLocation(), "rescore doesn't support [" + fieldName + "]"); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java index 6c781a8ee91..00af02a8056 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java @@ -112,7 +112,7 @@ public final class Laplace extends SmoothingModel { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } - if (token.isValue() && parseContext.getParseFieldMatcher().match(fieldName, ALPHA_FIELD)) { + if (token.isValue() && ALPHA_FIELD.match(fieldName)) { alpha = parser.doubleValue(); } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java index 3e67110ce7d..607ccac9747 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java @@ -115,7 +115,7 @@ public final class StupidBackoff extends SmoothingModel { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } - if (token.isValue() && parseContext.getParseFieldMatcher().match(fieldName, DISCOUNT_FIELD)) { + if (token.isValue() && DISCOUNT_FIELD.match(fieldName)) { discount = parser.doubleValue(); } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index ff3a9bf1ee6..632c5022838 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -246,7 +246,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder Date: Fri, 30 Dec 2016 15:28:18 +0100 Subject: [PATCH 045/119] Remove some more usages of ParseFieldMatcher in favour of using ParseField directly Relates to #19552 Relates to #22130 --- .../common/xcontent/ParseFieldRegistry.java | 2 +- .../index/query/MoreLikeThisQueryBuilder.java | 12 +++--- .../index/query/QueryParseContext.java | 2 +- .../BlobStoreIndexShardSnapshot.java | 14 +++---- .../BlobStoreIndexShardSnapshots.java | 6 +-- .../admin/indices/RestAnalyzeAction.java | 18 ++++----- .../search/aggregations/Aggregator.java | 2 +- .../bucket/range/RangeAggregator.java | 10 ++--- .../GeoDistanceAggregationBuilder.java | 10 ++--- .../bucket/sampler/SamplerAggregator.java | 2 +- .../SignificantTermsAggregatorFactory.java | 2 +- .../bucket/terms/TermsAggregatorFactory.java | 2 +- .../bucket/terms/support/IncludeExclude.java | 6 +-- .../movavg/models/HoltWintersModel.java | 2 +- .../fetch/subphase/FetchSourceContext.java | 8 ++-- .../search/sort/GeoDistanceSortBuilder.java | 18 ++++----- .../search/suggest/SuggestBuilder.java | 2 +- .../phrase/PhraseSuggestionBuilder.java | 40 +++++++++---------- .../search/suggest/phrase/SmoothingModel.java | 6 +-- .../suggest/term/TermSuggestionBuilder.java | 28 ++++++------- .../suggest/CustomSuggesterSearchIT.java | 10 ++--- .../matrix/stats/MatrixStatsParser.java | 2 +- .../support/MultiValuesSourceParser.java | 10 ++--- .../reindex/RestUpdateByQueryAction.java | 4 +- 24 files changed, 109 insertions(+), 109 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java b/core/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java index 81f5b995c18..f0f2d759902 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java @@ -103,7 +103,7 @@ public class ParseFieldRegistry { } ParseField parseField = parseFieldAndValue.v1(); T value = parseFieldAndValue.v2(); - boolean match = parseFieldMatcher.match(name, parseField); + boolean match = parseField.match(name); //this is always expected to match, ParseField is useful for deprecation warnings etc. here assert match : "ParseField did not match registered name [" + name + "][" + registryName + "]"; return value; diff --git a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index e4df758f2ba..1166a0d678d 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -359,15 +359,15 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder fields = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { @@ -378,7 +378,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder, To String currentFieldName = parser.currentName(); token = parser.nextToken(); if (token == XContentParser.Token.START_ARRAY) { - if (parseFieldMatcher.match(currentFieldName, ParseFields.FILES) == false) { + if (ParseFields.FILES.match(currentFieldName) == false) { throw new ElasticsearchParseException("unknown array [{}]", currentFieldName); } while (parser.nextToken() != XContentParser.Token.END_ARRAY) { @@ -253,7 +253,7 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To files.put(fileInfo.name(), fileInfo); } } else if (token == XContentParser.Token.START_OBJECT) { - if (parseFieldMatcher.match(currentFieldName, ParseFields.SNAPSHOTS) == false) { + if (ParseFields.SNAPSHOTS.match(currentFieldName) == false) { throw new ElasticsearchParseException("unknown object [{}]", currentFieldName); } while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -268,7 +268,7 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); if (parser.nextToken() == XContentParser.Token.START_ARRAY) { - if (parseFieldMatcher.match(currentFieldName, ParseFields.FILES) == false) { + if (ParseFields.FILES.match(currentFieldName) == false) { throw new ElasticsearchParseException("unknown array [{}]", currentFieldName); } List fileNames = new ArrayList<>(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java index c32010642e0..bbe022a318c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java @@ -83,9 +83,9 @@ public class RestAnalyzeAction extends BaseRestHandler { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); - } else if (parseFieldMatcher.match(currentFieldName, Fields.TEXT) && token == XContentParser.Token.VALUE_STRING) { + } else if (Fields.TEXT.match(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { analyzeRequest.text(parser.text()); - } else if (parseFieldMatcher.match(currentFieldName, Fields.TEXT) && token == XContentParser.Token.START_ARRAY) { + } else if (Fields.TEXT.match(currentFieldName) && token == XContentParser.Token.START_ARRAY) { List texts = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token.isValue() == false) { @@ -94,11 +94,11 @@ public class RestAnalyzeAction extends BaseRestHandler { texts.add(parser.text()); } analyzeRequest.text(texts.toArray(new String[texts.size()])); - } else if (parseFieldMatcher.match(currentFieldName, Fields.ANALYZER) && token == XContentParser.Token.VALUE_STRING) { + } else if (Fields.ANALYZER.match(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { analyzeRequest.analyzer(parser.text()); - } else if (parseFieldMatcher.match(currentFieldName, Fields.FIELD) && token == XContentParser.Token.VALUE_STRING) { + } else if (Fields.FIELD.match(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { analyzeRequest.field(parser.text()); - } else if (parseFieldMatcher.match(currentFieldName, Fields.TOKENIZER)) { + } else if (Fields.TOKENIZER.match(currentFieldName)) { if (token == XContentParser.Token.VALUE_STRING) { analyzeRequest.tokenizer(parser.text()); } else if (token == XContentParser.Token.START_OBJECT) { @@ -106,7 +106,7 @@ public class RestAnalyzeAction extends BaseRestHandler { } else { throw new IllegalArgumentException(currentFieldName + " should be tokenizer's name or setting"); } - } else if (parseFieldMatcher.match(currentFieldName, Fields.TOKEN_FILTERS) + } else if (Fields.TOKEN_FILTERS.match(currentFieldName) && token == XContentParser.Token.START_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -118,7 +118,7 @@ public class RestAnalyzeAction extends BaseRestHandler { + " array element should contain filter's name or setting"); } } - } else if (parseFieldMatcher.match(currentFieldName, Fields.CHAR_FILTERS) + } else if (Fields.CHAR_FILTERS.match(currentFieldName) && token == XContentParser.Token.START_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -130,13 +130,13 @@ public class RestAnalyzeAction extends BaseRestHandler { + " array element should contain char filter's name or setting"); } } - } else if (parseFieldMatcher.match(currentFieldName, Fields.EXPLAIN)) { + } else if (Fields.EXPLAIN.match(currentFieldName)) { if (parser.isBooleanValue()) { analyzeRequest.explain(parser.booleanValue()); } else { throw new IllegalArgumentException(currentFieldName + " must be either 'true' or 'false'"); } - } else if (parseFieldMatcher.match(currentFieldName, Fields.ATTRIBUTES) && token == XContentParser.Token.START_ARRAY) { + } else if (Fields.ATTRIBUTES.match(currentFieldName) && token == XContentParser.Token.START_ARRAY) { List attributes = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token.isValue() == false) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java index 37cfe18e83b..7c88f6373d8 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java @@ -132,7 +132,7 @@ public abstract class Aggregator extends BucketCollector implements Releasable { public static SubAggCollectionMode parse(String value, ParseFieldMatcher parseFieldMatcher) { SubAggCollectionMode[] modes = SubAggCollectionMode.values(); for (SubAggCollectionMode mode : modes) { - if (parseFieldMatcher.match(value, mode.parseField)) { + if (mode.parseField.match(value)) { return mode; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index cdb1dda221d..f526745322f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -134,17 +134,17 @@ public class RangeAggregator extends BucketsAggregator { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (parseFieldMatcher.match(currentFieldName, FROM_FIELD)) { + if (FROM_FIELD.match(currentFieldName)) { from = parser.doubleValue(); - } else if (parseFieldMatcher.match(currentFieldName, TO_FIELD)) { + } else if (TO_FIELD.match(currentFieldName)) { to = parser.doubleValue(); } } else if (token == XContentParser.Token.VALUE_STRING) { - if (parseFieldMatcher.match(currentFieldName, FROM_FIELD)) { + if (FROM_FIELD.match(currentFieldName)) { fromAsStr = parser.text(); - } else if (parseFieldMatcher.match(currentFieldName, TO_FIELD)) { + } else if (TO_FIELD.match(currentFieldName)) { toAsStr = parser.text(); - } else if (parseFieldMatcher.match(currentFieldName, KEY_FIELD)) { + } else if (KEY_FIELD.match(currentFieldName)) { key = parser.text(); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java index d70c150363b..85404dc178c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java @@ -180,17 +180,17 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde if (token == XContentParser.Token.FIELD_NAME) { toOrFromOrKey = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (parseFieldMatcher.match(toOrFromOrKey, Range.FROM_FIELD)) { + if (Range.FROM_FIELD.match(toOrFromOrKey)) { from = parser.doubleValue(); - } else if (parseFieldMatcher.match(toOrFromOrKey, Range.TO_FIELD)) { + } else if (Range.TO_FIELD.match(toOrFromOrKey)) { to = parser.doubleValue(); } } else if (token == XContentParser.Token.VALUE_STRING) { - if (parseFieldMatcher.match(toOrFromOrKey, Range.KEY_FIELD)) { + if (Range.KEY_FIELD.match(toOrFromOrKey)) { key = parser.text(); - } else if (parseFieldMatcher.match(toOrFromOrKey, Range.FROM_FIELD)) { + } else if (Range.FROM_FIELD.match(toOrFromOrKey)) { fromAsStr = parser.text(); - } else if (parseFieldMatcher.match(toOrFromOrKey, Range.TO_FIELD)) { + } else if (Range.TO_FIELD.match(toOrFromOrKey)) { toAsStr = parser.text(); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 7d15839062c..1e8238d8de3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -113,7 +113,7 @@ public class SamplerAggregator extends SingleBucketAggregator { public static ExecutionMode fromString(String value, ParseFieldMatcher parseFieldMatcher) { for (ExecutionMode mode : values()) { - if (parseFieldMatcher.match(value, mode.parseField)) { + if (mode.parseField.match(value)) { return mode; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index 80e15394ede..009c49af54f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -291,7 +291,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac public static ExecutionMode fromString(String value, ParseFieldMatcher parseFieldMatcher) { for (ExecutionMode mode : values()) { - if (parseFieldMatcher.match(value, mode.parseField)) { + if (mode.parseField.match(value)) { return mode; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 37e6813ebcc..5a512eaeb4d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -308,7 +308,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory includesList = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -159,7 +159,7 @@ public class FetchSourceContext implements Writeable, ToXContent { } } includes = includesList.toArray(new String[includesList.size()]); - } else if (parseFieldMatcher.match(currentFieldName, EXCLUDES_FIELD)) { + } else if (EXCLUDES_FIELD.match(currentFieldName)) { List excludesList = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { @@ -175,9 +175,9 @@ public class FetchSourceContext implements Writeable, ToXContent { + " in [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_STRING) { - if (parseFieldMatcher.match(currentFieldName, INCLUDES_FIELD)) { + if (INCLUDES_FIELD.match(currentFieldName)) { includes = new String[] {parser.text()}; - } else if (parseFieldMatcher.match(currentFieldName, EXCLUDES_FIELD)) { + } else if (EXCLUDES_FIELD.match(currentFieldName)) { excludes = new String[] {parser.text()}; } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 37e660d2a31..7111cee5766 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -422,7 +422,7 @@ public class GeoDistanceSortBuilder extends SortBuilder fieldName = currentName; } else if (token == XContentParser.Token.START_OBJECT) { - if (parseFieldMatcher.match(currentName, NESTED_FILTER_FIELD)) { + if (NESTED_FILTER_FIELD.match(currentName)) { nestedFilter = context.parseInnerQueryBuilder(); } else { // the json in the format of -> field : { lat : 30, lon : 12 } @@ -439,27 +439,27 @@ public class GeoDistanceSortBuilder extends SortBuilder geoPoints.add(point); } } else if (token.isValue()) { - if (parseFieldMatcher.match(currentName, ORDER_FIELD)) { + if (ORDER_FIELD.match(currentName)) { order = SortOrder.fromString(parser.text()); - } else if (parseFieldMatcher.match(currentName, UNIT_FIELD)) { + } else if (UNIT_FIELD.match(currentName)) { unit = DistanceUnit.fromString(parser.text()); - } else if (parseFieldMatcher.match(currentName, DISTANCE_TYPE_FIELD)) { + } else if (DISTANCE_TYPE_FIELD.match(currentName)) { geoDistance = GeoDistance.fromString(parser.text()); - } else if (parseFieldMatcher.match(currentName, COERCE_FIELD)) { + } else if (COERCE_FIELD.match(currentName)) { coerce = parser.booleanValue(); if (coerce) { ignoreMalformed = true; } - } else if (parseFieldMatcher.match(currentName, IGNORE_MALFORMED_FIELD)) { + } else if (IGNORE_MALFORMED_FIELD.match(currentName)) { boolean ignore_malformed_value = parser.booleanValue(); if (coerce == false) { ignoreMalformed = ignore_malformed_value; } - } else if (parseFieldMatcher.match(currentName, VALIDATION_METHOD_FIELD)) { + } else if (VALIDATION_METHOD_FIELD.match(currentName)) { validation = GeoValidationMethod.fromString(parser.text()); - } else if (parseFieldMatcher.match(currentName, SORTMODE_FIELD)) { + } else if (SORTMODE_FIELD.match(currentName)) { sortMode = SortMode.fromString(parser.text()); - } else if (parseFieldMatcher.match(currentName, NESTED_PATH_FIELD)) { + } else if (NESTED_PATH_FIELD.match(currentName)) { nestedPath = parser.text(); } else if (token == Token.VALUE_STRING){ if (fieldName != null && fieldName.equals(currentName) == false) { diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java index 332d18f256d..84c3da8618e 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java @@ -154,7 +154,7 @@ public class SuggestBuilder extends ToXContentToBytes implements Writeable { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } else if (token.isValue()) { - if (parseFieldMatcher.match(fieldName, GLOBAL_TEXT_FIELD)) { + if (GLOBAL_TEXT_FIELD.match(fieldName)) { suggestBuilder.setGlobalText(parser.text()); } else { throw new IllegalArgumentException("[suggest] does not support [" + fieldName + "]"); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index 969b1c24d5c..20d4f6853c0 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -499,34 +499,34 @@ public class PhraseSuggestionBuilder extends SuggestionBuilder otherOptions) throws IOException { - if (parseFieldMatcher.match(currentFieldName, MULTIVALUE_MODE_FIELD)) { + if (MULTIVALUE_MODE_FIELD.match(currentFieldName)) { if (token == XContentParser.Token.VALUE_STRING) { otherOptions.put(MULTIVALUE_MODE_FIELD, parser.text()); return true; diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java index 4311f975c1d..8d87d1fcd1c 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java @@ -93,11 +93,11 @@ public abstract class MultiValuesSourceParser implement if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (parseFieldMatcher.match(currentFieldName, CommonFields.FIELDS)) { + if (CommonFields.FIELDS.match(currentFieldName)) { fields = Collections.singletonList(parser.text()); - } else if (formattable && parseFieldMatcher.match(currentFieldName, CommonFields.FORMAT)) { + } else if (formattable && CommonFields.FORMAT.match(currentFieldName)) { format = parser.text(); - } else if (parseFieldMatcher.match(currentFieldName, CommonFields.VALUE_TYPE)) { + } else if (CommonFields.VALUE_TYPE.match(currentFieldName)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " + "Multi-field aggregations do not support scripts."); @@ -106,7 +106,7 @@ public abstract class MultiValuesSourceParser implement "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } } else if (token == XContentParser.Token.START_OBJECT) { - if (parseFieldMatcher.match(currentFieldName, CommonFields.MISSING)) { + if (CommonFields.MISSING.match(currentFieldName)) { missingMap = new HashMap<>(); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { parseMissingAndAdd(aggregationName, currentFieldName, parser, missingMap); @@ -125,7 +125,7 @@ public abstract class MultiValuesSourceParser implement throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " + "Multi-field aggregations do not support scripts."); - } else if (parseFieldMatcher.match(currentFieldName, CommonFields.FIELDS)) { + } else if (CommonFields.FIELDS.match(currentFieldName)) { fields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java index 8b197ebc21a..63640c84618 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java @@ -87,13 +87,13 @@ public class RestUpdateByQueryAction extends AbstractBulkByQueryRestHandler entry = itr.next(); String parameterName = entry.getKey(); Object parameterValue = entry.getValue(); - if (parseFieldMatcher.match(parameterName, Script.LANG_PARSE_FIELD)) { + if (Script.LANG_PARSE_FIELD.match(parameterName)) { if (parameterValue instanceof String || parameterValue == null) { lang = (String) parameterValue; } else { throw new ElasticsearchParseException("Value must be of type String: [" + parameterName + "]"); } - } else if (parseFieldMatcher.match(parameterName, Script.PARAMS_PARSE_FIELD)) { + } else if (Script.PARAMS_PARSE_FIELD.match(parameterName)) { if (parameterValue instanceof Map || parameterValue == null) { params = (Map) parameterValue; } else { From 74acffaae93f2e9108423cb74ee64b931511e173 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 30 Dec 2016 16:02:17 +0100 Subject: [PATCH 046/119] fix compiler warning on access to static field using `this` --- .../java/org/elasticsearch/index/query/QueryParseContext.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java index 906fa79d866..df5dab9e52a 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryParseContext.java @@ -55,7 +55,7 @@ public class QueryParseContext implements ParseFieldMatcherSupplier { } public boolean isDeprecatedSetting(String setting) { - return this.CACHE.match(setting) || this.CACHE_KEY.match(setting); + return CACHE.match(setting) || CACHE_KEY.match(setting); } /** From f985638bba1d49dfe07f4696ba363656978f342b Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 29 Dec 2016 18:41:57 -0500 Subject: [PATCH 047/119] Add a generic way of checking version before serializing custom cluster object In #22313 we added a check that prevents the SnapshotDeletionsInProgress custom cluster state objects from being sent to older elasticsearch nodes. This commits make this check generic and available to other cluster state custom objects if needed. --- .../cluster/AbstractNamedDiffable.java | 29 ++++++-- .../elasticsearch/cluster/ClusterState.java | 64 +++++------------ .../elasticsearch/cluster/DiffableUtils.java | 48 ++++++++++--- .../org/elasticsearch/cluster/NamedDiff.java | 7 ++ .../elasticsearch/cluster/NamedDiffable.java | 35 +++++++++ .../cluster/NamedDiffableValueSerializer.java | 58 +++++++++++++++ .../cluster/SnapshotDeletionsInProgress.java | 5 ++ .../cluster/metadata/MetaData.java | 40 +++++------ .../ClusterSerializationTests.java | 71 +++++++++++++++++++ 9 files changed, 278 insertions(+), 79 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/cluster/NamedDiffable.java create mode 100644 core/src/main/java/org/elasticsearch/cluster/NamedDiffableValueSerializer.java diff --git a/core/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java b/core/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java index 2a3c619ea21..00e55463118 100644 --- a/core/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java +++ b/core/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,43 +31,52 @@ import java.io.IOException; * Abstract diffable object with simple diffs implementation that sends the entire object if object has changed or * nothing is object remained the same. Comparing to AbstractDiffable, this class also works with NamedWriteables */ -public abstract class AbstractNamedDiffable & NamedWriteable> implements Diffable, NamedWriteable { +public abstract class AbstractNamedDiffable> implements Diffable, NamedWriteable { @Override public Diff diff(T previousState) { if (this.get().equals(previousState)) { - return new CompleteNamedDiff<>(previousState.getWriteableName()); + return new CompleteNamedDiff<>(previousState.getWriteableName(), previousState.getMinimalSupportedVersion()); } else { return new CompleteNamedDiff<>(get()); } } - public static & NamedWriteable> NamedDiff readDiffFrom(Class tClass, String name, StreamInput in) + public static > NamedDiff readDiffFrom(Class tClass, String name, StreamInput in) throws IOException { return new CompleteNamedDiff<>(tClass, name, in); } - private static class CompleteNamedDiff & NamedWriteable> implements NamedDiff { + private static class CompleteNamedDiff> implements NamedDiff { @Nullable private final T part; private final String name; + /** + * A non-null value is only required for write operation, if the diff was just read from the stream the version + * is unnecessary. + */ + @Nullable + private final Version minimalSupportedVersion; + /** * Creates simple diff with changes */ public CompleteNamedDiff(T part) { this.part = part; this.name = part.getWriteableName(); + this.minimalSupportedVersion = part.getMinimalSupportedVersion(); } /** * Creates simple diff without changes */ - public CompleteNamedDiff(String name) { + public CompleteNamedDiff(String name, Version minimalSupportedVersion) { this.part = null; this.name = name; + this.minimalSupportedVersion = minimalSupportedVersion; } /** @@ -75,14 +85,17 @@ public abstract class AbstractNamedDiffable & NamedWriteab public CompleteNamedDiff(Class tClass, String name, StreamInput in) throws IOException { if (in.readBoolean()) { this.part = in.readNamedWriteable(tClass, name); + this.minimalSupportedVersion = part.getMinimalSupportedVersion(); } else { this.part = null; + this.minimalSupportedVersion = null; // We just read this diff, so it's not going to be written } this.name = name; } @Override public void writeTo(StreamOutput out) throws IOException { + assert minimalSupportedVersion != null : "shouldn't be called on diff that was de-serialized from the stream"; if (part != null) { out.writeBoolean(true); part.writeTo(out); @@ -104,6 +117,12 @@ public abstract class AbstractNamedDiffable & NamedWriteab public String getWriteableName() { return name; } + + @Override + public Version getMinimalSupportedVersion() { + assert minimalSupportedVersion != null : "shouldn't be called on the diff that was de-serialized from the stream"; + return minimalSupportedVersion; + } } @SuppressWarnings("unchecked") diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index a2b876eca35..35cce3ab213 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -22,6 +22,7 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.Version; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -37,16 +38,13 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -92,10 +90,11 @@ public class ClusterState implements ToXContent, Diffable { public static final ClusterState EMPTY_STATE = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); - public interface Custom extends Diffable, ToXContent, NamedWriteable { - + public interface Custom extends NamedDiffable, ToXContent { } + private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class); + public static final String UNKNOWN_UUID = "_na_"; public static final long UNKNOWN_VERSION = -1; @@ -679,19 +678,18 @@ public class ClusterState implements ToXContent, Diffable { routingTable.writeTo(out); nodes.writeTo(out); blocks.writeTo(out); - boolean omitSnapshotDeletions = false; - if (out.getVersion().before(SnapshotDeletionsInProgress.VERSION_INTRODUCED) - && customs.containsKey(SnapshotDeletionsInProgress.TYPE)) { - // before the stated version, there were no SnapshotDeletionsInProgress, so - // don't transfer over the wire protocol - omitSnapshotDeletions = true; - } - out.writeVInt(omitSnapshotDeletions ? customs.size() - 1 : customs.size()); - for (ObjectObjectCursor cursor : customs) { - if (omitSnapshotDeletions && cursor.key.equals(SnapshotDeletionsInProgress.TYPE)) { - continue; + // filter out custom states not supported by the other node + int numberOfCustoms = 0; + for (ObjectCursor cursor : customs.values()) { + if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) { + numberOfCustoms++; + } + } + out.writeVInt(numberOfCustoms); + for (ObjectCursor cursor : customs.values()) { + if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) { + out.writeNamedWriteable(cursor.value); } - out.writeNamedWriteable(cursor.value); } } @@ -724,7 +722,7 @@ public class ClusterState implements ToXContent, Diffable { nodes = after.nodes.diff(before.nodes); metaData = after.metaData.diff(before.metaData); blocks = after.blocks.diff(before.blocks); - customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer()); + customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); } public ClusterStateDiff(StreamInput in, DiscoveryNode localNode) throws IOException { @@ -736,19 +734,7 @@ public class ClusterState implements ToXContent, Diffable { nodes = DiscoveryNodes.readDiffFrom(in, localNode); metaData = MetaData.readDiffFrom(in); blocks = ClusterBlocks.readDiffFrom(in); - customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), - new DiffableUtils.DiffableValueSerializer() { - @Override - public Custom read(StreamInput in, String key) throws IOException { - return in.readNamedWriteable(Custom.class, key); - } - - @SuppressWarnings("unchecked") - @Override - public Diff readDiff(StreamInput in, String key) throws IOException { - return in.readNamedWriteable(NamedDiff.class, key); - } - }); + customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); } @Override @@ -761,21 +747,7 @@ public class ClusterState implements ToXContent, Diffable { nodes.writeTo(out); metaData.writeTo(out); blocks.writeTo(out); - Diff> customsDiff = customs; - if (out.getVersion().before(SnapshotDeletionsInProgress.VERSION_INTRODUCED)) { - customsDiff = removeSnapshotDeletionsCustomDiff(customsDiff); - } - customsDiff.writeTo(out); - } - - private Diff> removeSnapshotDeletionsCustomDiff(Diff> customs) { - if (customs instanceof DiffableUtils.ImmutableOpenMapDiff) { - @SuppressWarnings("unchecked") - DiffableUtils.ImmutableOpenMapDiff customsDiff = ((DiffableUtils.ImmutableOpenMapDiff) customs) - .withKeyRemoved(SnapshotDeletionsInProgress.TYPE); - return customsDiff; - } - return customs; + customs.writeTo(out); } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index eee4a14ad19..ca83a8947f4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.IntCursor; import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.Version; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; @@ -75,7 +76,7 @@ public final class DiffableUtils { /** * Calculates diff between two ImmutableOpenMaps of non-diffable objects */ - public static MapDiff> diff(ImmutableOpenMap before, ImmutableOpenMap after, KeySerializer keySerializer, NonDiffableValueSerializer valueSerializer) { + public static MapDiff> diff(ImmutableOpenMap before, ImmutableOpenMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) { assert after != null && before != null; return new ImmutableOpenMapDiff<>(before, after, keySerializer, valueSerializer); } @@ -91,7 +92,7 @@ public final class DiffableUtils { /** * Calculates diff between two ImmutableOpenIntMaps of non-diffable objects */ - public static MapDiff> diff(ImmutableOpenIntMap before, ImmutableOpenIntMap after, KeySerializer keySerializer, NonDiffableValueSerializer valueSerializer) { + public static MapDiff> diff(ImmutableOpenIntMap before, ImmutableOpenIntMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) { assert after != null && before != null; return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, valueSerializer); } @@ -107,7 +108,7 @@ public final class DiffableUtils { /** * Calculates diff between two Maps of non-diffable objects */ - public static MapDiff> diff(Map before, Map after, KeySerializer keySerializer, NonDiffableValueSerializer valueSerializer) { + public static MapDiff> diff(Map before, Map after, KeySerializer keySerializer, ValueSerializer valueSerializer) { assert after != null && before != null; return new JdkMapDiff<>(before, after, keySerializer, valueSerializer); } @@ -436,12 +437,29 @@ public final class DiffableUtils { for (K delete : deletes) { keySerializer.writeKey(delete, out); } - out.writeVInt(diffs.size()); - for (Map.Entry> entry : diffs.entrySet()) { - keySerializer.writeKey(entry.getKey(), out); - valueSerializer.writeDiff(entry.getValue(), out); + Version version = out.getVersion(); + // filter out custom states not supported by the other node + int diffCount = 0; + for (Diff diff : diffs.values()) { + if(valueSerializer.supportsVersion(diff, version)) { + diffCount++; + } } - out.writeVInt(upserts.size()); + out.writeVInt(diffCount); + for (Map.Entry> entry : diffs.entrySet()) { + if(valueSerializer.supportsVersion(entry.getValue(), version)) { + keySerializer.writeKey(entry.getKey(), out); + valueSerializer.writeDiff(entry.getValue(), out); + } + } + // filter out custom states not supported by the other node + int upsertsCount = 0; + for (T upsert : upserts.values()) { + if(valueSerializer.supportsVersion(upsert, version)) { + upsertsCount++; + } + } + out.writeVInt(upsertsCount); for (Map.Entry entry : upserts.entrySet()) { keySerializer.writeKey(entry.getKey(), out); valueSerializer.write(entry.getValue(), out); @@ -541,6 +559,20 @@ public final class DiffableUtils { */ boolean supportsDiffableValues(); + /** + * Whether this serializer supports the version of the output stream + */ + default boolean supportsVersion(Diff value, Version version) { + return true; + } + + /** + * Whether this serializer supports the version of the output stream + */ + default boolean supportsVersion(V value, Version version) { + return true; + } + /** * Computes diff if this serializer supports diffable values */ diff --git a/core/src/main/java/org/elasticsearch/cluster/NamedDiff.java b/core/src/main/java/org/elasticsearch/cluster/NamedDiff.java index a5dda7ba716..9da3167ae88 100644 --- a/core/src/main/java/org/elasticsearch/cluster/NamedDiff.java +++ b/core/src/main/java/org/elasticsearch/cluster/NamedDiff.java @@ -19,11 +19,18 @@ package org.elasticsearch.cluster; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.NamedWriteable; /** * Diff that also support NamedWriteable interface */ public interface NamedDiff> extends Diff, NamedWriteable { + /** + * The minimal version of the recipient this custom object can be sent to + */ + default Version getMinimalSupportedVersion() { + return Version.CURRENT.minimumCompatibilityVersion(); + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/NamedDiffable.java b/core/src/main/java/org/elasticsearch/cluster/NamedDiffable.java new file mode 100644 index 00000000000..07974422096 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/NamedDiffable.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteable; + +/** + * Diff that also support NamedWriteable interface + */ +public interface NamedDiffable extends Diffable, NamedWriteable { + /** + * The minimal version of the recipient this custom object can be sent to + */ + default Version getMinimalSupportedVersion() { + return Version.CURRENT.minimumCompatibilityVersion(); + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/NamedDiffableValueSerializer.java b/core/src/main/java/org/elasticsearch/cluster/NamedDiffableValueSerializer.java new file mode 100644 index 00000000000..c6434db9e87 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/NamedDiffableValueSerializer.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Value Serializer for named diffables + */ +public class NamedDiffableValueSerializer> extends DiffableUtils.DiffableValueSerializer { + + private final Class tClass; + + public NamedDiffableValueSerializer(Class tClass) { + this.tClass = tClass; + } + + @Override + public T read(StreamInput in, String key) throws IOException { + return in.readNamedWriteable(tClass, key); + } + + @Override + public boolean supportsVersion(Diff value, Version version) { + return version.onOrAfter(((NamedDiff)value).getMinimalSupportedVersion()); + } + + @Override + public boolean supportsVersion(T value, Version version) { + return version.onOrAfter(value.getMinimalSupportedVersion()); + } + + @SuppressWarnings("unchecked") + @Override + public Diff readDiff(StreamInput in, String key) throws IOException { + return in.readNamedWriteable(NamedDiff.class, key); + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index de3c2ae9ed8..b3ab12fe21a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -128,6 +128,11 @@ public class SnapshotDeletionsInProgress extends AbstractNamedDiffable i return readDiffFrom(Custom.class, TYPE, in); } + @Override + public Version getMinimalSupportedVersion() { + return VERSION_INTRODUCED; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startArray(TYPE); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index d00be01ed58..f19eb2c5c6b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -24,17 +24,18 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.Version; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; import org.elasticsearch.cluster.DiffableUtils; -import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.NamedDiffable; +import org.elasticsearch.cluster.NamedDiffableValueSerializer; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.Loggers; @@ -110,7 +111,7 @@ public class MetaData implements Iterable, Diffable, To */ public static EnumSet ALL_CONTEXTS = EnumSet.allOf(XContentContext.class); - public interface Custom extends Diffable, ToXContent, NamedWriteable { + public interface Custom extends NamedDiffable, ToXContent { EnumSet context(); } @@ -130,6 +131,8 @@ public class MetaData implements Iterable, Diffable, To public static final String GLOBAL_STATE_FILE_PREFIX = "global-"; + private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class); + private final String clusterUUID; private final long version; @@ -604,7 +607,7 @@ public class MetaData implements Iterable, Diffable, To persistentSettings = after.persistentSettings; indices = DiffableUtils.diff(before.indices, after.indices, DiffableUtils.getStringKeySerializer()); templates = DiffableUtils.diff(before.templates, after.templates, DiffableUtils.getStringKeySerializer()); - customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer()); + customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); } public MetaDataDiff(StreamInput in) throws IOException { @@ -616,19 +619,7 @@ public class MetaData implements Iterable, Diffable, To IndexMetaData::readDiffFrom); templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData::readFrom, IndexTemplateMetaData::readDiffFrom); - customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), - new DiffableUtils.DiffableValueSerializer() { - @Override - public Custom read(StreamInput in, String key) throws IOException { - return in.readNamedWriteable(Custom.class, key); - } - - @SuppressWarnings("unchecked") - @Override - public Diff readDiff(StreamInput in, String key) throws IOException { - return in.readNamedWriteable(NamedDiff.class, key); - } - }); + customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); } @Override @@ -692,9 +683,18 @@ public class MetaData implements Iterable, Diffable, To for (ObjectCursor cursor : templates.values()) { cursor.value.writeTo(out); } - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeNamedWriteable(cursor.value); + // filter out custom states not supported by the other node + int numberOfCustoms = 0; + for (ObjectCursor cursor : customs.values()) { + if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) { + numberOfCustoms++; + } + } + out.writeVInt(numberOfCustoms); + for (ObjectCursor cursor : customs.values()) { + if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) { + out.writeNamedWriteable(cursor.value); + } } } diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index d6252fd4366..4e77741694e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -23,20 +23,31 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.RestoreInProgress; +import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; import java.util.Collections; +import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class ClusterSerializationTests extends ESAllocationTestCase { @@ -89,4 +100,64 @@ public class ClusterSerializationTests extends ESAllocationTestCase { assertThat(target.toString(), equalTo(source.toString())); } + public void testSnapshotDeletionsInProgressSerialization() throws Exception { + + boolean includeRestore = randomBoolean(); + + ClusterState.Builder builder = ClusterState.builder(ClusterState.EMPTY_STATE) + .putCustom(SnapshotDeletionsInProgress.TYPE, + SnapshotDeletionsInProgress.newInstance( + new SnapshotDeletionsInProgress.Entry( + new Snapshot("repo1", new SnapshotId("snap1", UUIDs.randomBase64UUID())), + randomNonNegativeLong(), randomNonNegativeLong()) + )); + if (includeRestore) { + builder.putCustom(RestoreInProgress.TYPE, + new RestoreInProgress( + new RestoreInProgress.Entry( + new Snapshot("repo2", new SnapshotId("snap2", UUIDs.randomBase64UUID())), + RestoreInProgress.State.STARTED, + Collections.singletonList("index_name"), + ImmutableOpenMap.of() + ) + )); + } + + ClusterState clusterState = builder.incrementVersion().build(); + + Diff diffs = clusterState.diff(ClusterState.EMPTY_STATE); + + // serialize with current version + BytesStreamOutput outStream = new BytesStreamOutput(); + diffs.writeTo(outStream); + StreamInput inStream = outStream.bytes().streamInput(); + inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables())); + Diff serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode()); + ClusterState stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE); + assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue()); + assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), notNullValue()); + + // serialize with old version + outStream = new BytesStreamOutput(); + outStream.setVersion(Version.CURRENT.minimumCompatibilityVersion()); + diffs.writeTo(outStream); + inStream = outStream.bytes().streamInput(); + inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables())); + serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode()); + stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE); + assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue()); + assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), nullValue()); + + // remove the custom and try serializing again with old version + clusterState = ClusterState.builder(clusterState).removeCustom(SnapshotDeletionsInProgress.TYPE).incrementVersion().build(); + outStream = new BytesStreamOutput(); + diffs.writeTo(outStream); + inStream = outStream.bytes().streamInput(); + inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables())); + serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode()); + stateAfterDiffs = serializedDiffs.apply(stateAfterDiffs); + assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue()); + assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), nullValue()); + } + } From cd6b56928624743d0d66c325133c79cf2c968b11 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 30 Dec 2016 20:10:50 +0100 Subject: [PATCH 048/119] Remove some usages of ParseFieldMatcher in favour of using ParseField directly Relates to #19552 Relates to #22130 --- .../elasticsearch/common/xcontent/ObjectParser.java | 2 +- .../index/mapper/CompletionFieldMapper.java | 13 ++++++------- .../index/mapper/ParentFieldMapper.java | 2 +- .../index/query/MultiMatchQueryBuilder.java | 3 +-- .../index/query/support/QueryParsers.java | 12 ++++++------ .../admin/indices/RestClearIndicesCacheAction.java | 10 +++++----- .../bucket/range/ip/IpRangeAggregationBuilder.java | 8 ++++---- .../bucket/significant/heuristics/GND.java | 2 +- .../heuristics/NXYSignificanceHeuristic.java | 4 ++-- .../metrics/tophits/TopHitsAggregationBuilder.java | 3 +-- .../search/suggest/SuggestionBuilder.java | 6 +++--- .../search/suggest/phrase/LinearInterpolation.java | 6 +++--- .../aggregations/support/IncludeExcludeTests.java | 4 ++-- .../index/reindex/RestUpdateByQueryAction.java | 6 +++--- 14 files changed, 39 insertions(+), 42 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java index 8bd3b634d69..d0163054e66 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java @@ -417,7 +417,7 @@ public final class ObjectParser fieldDataSettings = SettingsLoader.Helper.loadNestedFromMap(nodeMapValue(fieldNode, "fielddata")); if (fieldDataSettings.containsKey("loading")) { diff --git a/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index b265124e78a..1b06c953a15 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -43,7 +43,6 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.TreeMap; /** @@ -152,7 +151,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder entry : request.params().entrySet()) { - if (parseFieldMatcher.match(entry.getKey(), Fields.QUERY)) { + if (Fields.QUERY.match(entry.getKey())) { clearIndicesCacheRequest.queryCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.queryCache())); } - if (parseFieldMatcher.match(entry.getKey(), Fields.REQUEST_CACHE)) { + if (Fields.REQUEST_CACHE.match(entry.getKey())) { clearIndicesCacheRequest.requestCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.requestCache())); } - if (parseFieldMatcher.match(entry.getKey(), Fields.FIELD_DATA)) { + if (Fields.FIELD_DATA.match(entry.getKey())) { clearIndicesCacheRequest.fieldDataCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.fieldDataCache())); } - if (parseFieldMatcher.match(entry.getKey(), Fields.RECYCLER)) { + if (Fields.RECYCLER.match(entry.getKey())) { clearIndicesCacheRequest.recycler(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.recycler())); } - if (parseFieldMatcher.match(entry.getKey(), Fields.FIELDS)) { + if (Fields.FIELDS.match(entry.getKey())) { clearIndicesCacheRequest.fields(request.paramAsStringArray(entry.getKey(), clearIndicesCacheRequest.fields())); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java index 507f1961d62..5612743db61 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java @@ -94,13 +94,13 @@ public final class IpRangeAggregationBuilder if (parser.currentToken() == Token.FIELD_NAME) { continue; } - if (parseFieldMatcher.match(parser.currentName(), RangeAggregator.Range.KEY_FIELD)) { + if (RangeAggregator.Range.KEY_FIELD.match(parser.currentName())) { key = parser.text(); - } else if (parseFieldMatcher.match(parser.currentName(), RangeAggregator.Range.FROM_FIELD)) { + } else if (RangeAggregator.Range.FROM_FIELD.match(parser.currentName())) { from = parser.textOrNull(); - } else if (parseFieldMatcher.match(parser.currentName(), RangeAggregator.Range.TO_FIELD)) { + } else if (RangeAggregator.Range.TO_FIELD.match(parser.currentName())) { to = parser.textOrNull(); - } else if (parseFieldMatcher.match(parser.currentName(), MASK_FIELD)) { + } else if (MASK_FIELD.match(parser.currentName())) { mask = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "Unexpected ip range parameter: [" + parser.currentName() + "]"); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java index 6c392f0f283..5968f42211e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java @@ -119,7 +119,7 @@ public class GND extends NXYSignificanceHeuristic { boolean backgroundIsSuperset = true; XContentParser.Token token = parser.nextToken(); while (!token.equals(XContentParser.Token.END_OBJECT)) { - if (context.getParseFieldMatcher().match(parser.currentName(), BACKGROUND_IS_SUPERSET)) { + if (BACKGROUND_IS_SUPERSET.match(parser.currentName())) { parser.nextToken(); backgroundIsSuperset = parser.booleanValue(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java index 69a759a77fd..5f92b5b40e6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java @@ -160,10 +160,10 @@ public abstract class NXYSignificanceHeuristic extends SignificanceHeuristic { boolean backgroundIsSuperset = true; XContentParser.Token token = parser.nextToken(); while (!token.equals(XContentParser.Token.END_OBJECT)) { - if (context.getParseFieldMatcher().match(parser.currentName(), INCLUDE_NEGATIVES_FIELD)) { + if (INCLUDE_NEGATIVES_FIELD.match(parser.currentName())) { parser.nextToken(); includeNegatives = parser.booleanValue(); - } else if (context.getParseFieldMatcher().match(parser.currentName(), BACKGROUND_IS_SUPERSET)) { + } else if (BACKGROUND_IS_SUPERSET.match(parser.currentName())) { parser.nextToken(); backgroundIsSuperset = parser.booleanValue(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java index 33da060ba7d..7cfc0cf61c1 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java @@ -643,8 +643,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder> implemen if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - if (parsefieldMatcher.match(currentFieldName, TEXT_FIELD)) { + if (TEXT_FIELD.match(currentFieldName)) { suggestText = parser.text(); - } else if (parsefieldMatcher.match(currentFieldName, PREFIX_FIELD)) { + } else if (PREFIX_FIELD.match(currentFieldName)) { prefix = parser.text(); - } else if (parsefieldMatcher.match(currentFieldName, REGEX_FIELD)) { + } else if (PREFIX_FIELD.match(currentFieldName)) { regex = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "suggestion does not support [" + currentFieldName + "]"); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java index 72802e66eaa..e76868b5b3f 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java @@ -143,17 +143,17 @@ public final class LinearInterpolation extends SmoothingModel { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } else if (token.isValue()) { - if (matcher.match(fieldName, TRIGRAM_FIELD)) { + if (TRIGRAM_FIELD.match(fieldName)) { trigramLambda = parser.doubleValue(); if (trigramLambda < 0) { throw new IllegalArgumentException("trigram_lambda must be positive"); } - } else if (matcher.match(fieldName, BIGRAM_FIELD)) { + } else if (BIGRAM_FIELD.match(fieldName)) { bigramLambda = parser.doubleValue(); if (bigramLambda < 0) { throw new IllegalArgumentException("bigram_lambda must be positive"); } - } else if (matcher.match(fieldName, UNIGRAM_FIELD)) { + } else if (UNIGRAM_FIELD.match(fieldName)) { unigramLambda = parser.doubleValue(); if (unigramLambda < 0) { throw new IllegalArgumentException("unigram_lambda must be positive"); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/support/IncludeExcludeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/support/IncludeExcludeTests.java index e982bb7ccf9..1f4570b8b2d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/support/IncludeExcludeTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/support/IncludeExcludeTests.java @@ -282,10 +282,10 @@ public class IncludeExcludeTests extends ESTestCase { IncludeExclude exc = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { assertEquals(XContentParser.Token.FIELD_NAME, token); - if (parseFieldMatcher.match(parser.currentName(), IncludeExclude.INCLUDE_FIELD)) { + if (IncludeExclude.INCLUDE_FIELD.match(parser.currentName())) { token = parser.nextToken(); inc = IncludeExclude.parseInclude(parser, parseContext); - } else if (parseFieldMatcher.match(parser.currentName(), IncludeExclude.EXCLUDE_FIELD)) { + } else if (IncludeExclude.EXCLUDE_FIELD.match(parser.currentName())) { token = parser.nextToken(); exc = IncludeExclude.parseExclude(parser, parseContext); } else { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java index 63640c84618..f21083e4ef3 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java @@ -99,21 +99,21 @@ public class RestUpdateByQueryAction extends AbstractBulkByQueryRestHandler Date: Fri, 30 Dec 2016 20:21:48 +0100 Subject: [PATCH 049/119] Remove unused ParseFieldMatcher#match method --- .../org/elasticsearch/common/ParseFieldMatcher.java | 11 ----------- .../search/suggest/SuggestionBuilder.java | 2 +- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/ParseFieldMatcher.java b/core/src/main/java/org/elasticsearch/common/ParseFieldMatcher.java index a7d412398e5..9622a9c5909 100644 --- a/core/src/main/java/org/elasticsearch/common/ParseFieldMatcher.java +++ b/core/src/main/java/org/elasticsearch/common/ParseFieldMatcher.java @@ -36,15 +36,4 @@ public class ParseFieldMatcher { public ParseFieldMatcher(Settings settings) { //we don't do anything with the settings argument, this whole class will be soon removed } - - /** - * Matches a {@link ParseField} against a field name, - * @param fieldName the field name found in the request while parsing - * @param parseField the parse field that we are looking for - * @throws IllegalArgumentException whenever we are in strict mode and the request contained a deprecated field - * @return true whenever the parse field that we are looking for was found, false otherwise - */ - public boolean match(String fieldName, ParseField parseField) { - return parseField.match(fieldName); - } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java index 5f99cb43646..4f6c4d8c553 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java @@ -273,7 +273,7 @@ public abstract class SuggestionBuilder> implemen suggestText = parser.text(); } else if (PREFIX_FIELD.match(currentFieldName)) { prefix = parser.text(); - } else if (PREFIX_FIELD.match(currentFieldName)) { + } else if (REGEX_FIELD.match(currentFieldName)) { regex = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), "suggestion does not support [" + currentFieldName + "]"); From 20ab4be59f072e21c3cb5877d4aebfae43e98680 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Mon, 2 Jan 2017 12:28:32 -0600 Subject: [PATCH 050/119] Cluster Explain API uses the allocation process to explain shard allocation decisions (#22182) This PR completes the refactoring of the cluster allocation explain API and improves it in the following two high-level ways: 1. The explain API now uses the same allocators that the AllocationService uses to make shard allocation decisions. Prior to this PR, the explain API would run the deciders against each node for the shard in question, but this was not executed on the same code path as the allocators, and many of the scenarios in shard allocation were not captured due to not executing through the same code paths as the allocators. 2. The APIs have changed, both on the Java and JSON level, to accurately capture the decisions made by the system. The APIs also now report on shard moving and rebalancing decisions, whereas the previous API did not report decisions for moving shards which cannot remain on their current node or rebalancing shards to form a more balanced cluster. Note: this change affects plugin developers who may have a custom implementation of the ShardsAllocator interface. The method weighShards has been removed and no longer has any utility. In order to support the new explain API, however, a custom implementation of ShardsAllocator must now implement ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) which provides a decision and explanation for allocating a single shard. For implementations that do not support explaining a single shard allocation via the cluster allocation explain API, this method can simply return an UnsupportedOperationException. --- .../ClusterAllocationExplainRequest.java | 88 +- ...lusterAllocationExplainRequestBuilder.java | 10 +- .../ClusterAllocationExplainResponse.java | 1 - .../ClusterAllocationExplanation.java | 327 ++-- .../cluster/allocation/NodeExplanation.java | 147 -- ...ansportClusterAllocationExplainAction.java | 303 +--- .../AbstractAllocationDecision.java | 20 +- .../AllocateUnassignedDecision.java | 59 +- .../allocation/AllocationDecision.java | 20 +- .../routing/allocation/MoveDecision.java | 86 +- .../routing/allocation/RoutingAllocation.java | 28 +- .../allocation/ShardAllocationDecision.java | 105 ++ .../allocator/BalancedShardsAllocator.java | 41 +- .../allocation/allocator/ShardsAllocator.java | 23 +- .../decider/AllocationDeciders.java | 29 +- .../routing/allocation/decider/Decision.java | 13 + .../decider/EnableAllocationDecider.java | 4 +- .../gateway/GatewayAllocator.java | 45 +- .../gateway/PrimaryShardAllocator.java | 2 +- .../RestClusterAllocationExplainAction.java | 2 + .../ClusterAllocationExplainActionTests.java | 170 +++ .../ClusterAllocationExplainIT.java | 1322 +++++++++++++++-- .../ClusterAllocationExplainRequestTests.java | 4 +- .../ClusterAllocationExplainTests.java | 98 -- .../ClusterAllocationExplanationTests.java | 225 +-- .../cluster/ClusterModuleTests.java | 8 +- .../AllocateUnassignedDecisionTests.java | 4 +- .../allocation/AllocationDecisionTests.java | 20 +- .../allocation/BalanceConfigurationTests.java | 11 +- .../allocation/BalancedSingleShardTests.java | 30 +- .../routing/allocation/MoveDecisionTests.java | 2 - .../cluster.allocation_explain/10_basic.yaml | 56 +- 32 files changed, 2116 insertions(+), 1187 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java create mode 100644 core/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java create mode 100644 core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java delete mode 100644 core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainTests.java diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index f31b1d37376..ff09c23207f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.cluster.allocation; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.Nullable; @@ -46,30 +47,42 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest nodeExplanations; + private final ShardRouting shardRouting; + private final DiscoveryNode currentNode; + private final DiscoveryNode relocationTargetNode; private final ClusterInfo clusterInfo; + private final ShardAllocationDecision shardAllocationDecision; - public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis, - long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch, - Map nodeExplanations, @Nullable ClusterInfo clusterInfo) { - this.shard = shard; - this.primary = primary; - this.hasPendingAsyncFetch = hasPendingAsyncFetch; - this.assignedNodeId = assignedNodeId; - this.unassignedInfo = unassignedInfo; - this.allocationDelayMillis = allocationDelayMillis; - this.remainingDelayMillis = remainingDelayMillis; - this.nodeExplanations = nodeExplanations; + public ClusterAllocationExplanation(ShardRouting shardRouting, @Nullable DiscoveryNode currentNode, + @Nullable DiscoveryNode relocationTargetNode, @Nullable ClusterInfo clusterInfo, + ShardAllocationDecision shardAllocationDecision) { + this.shardRouting = shardRouting; + this.currentNode = currentNode; + this.relocationTargetNode = relocationTargetNode; this.clusterInfo = clusterInfo; + this.shardAllocationDecision = shardAllocationDecision; } public ClusterAllocationExplanation(StreamInput in) throws IOException { - this.shard = ShardId.readShardId(in); - this.primary = in.readBoolean(); - this.hasPendingAsyncFetch = in.readBoolean(); - this.assignedNodeId = in.readOptionalString(); - this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new); - this.allocationDelayMillis = in.readVLong(); - this.remainingDelayMillis = in.readVLong(); - - int mapSize = in.readVInt(); - Map nodeToExplanation = new HashMap<>(mapSize); - for (int i = 0; i < mapSize; i++) { - NodeExplanation nodeExplanation = new NodeExplanation(in); - nodeToExplanation.put(nodeExplanation.getNode(), nodeExplanation); - } - this.nodeExplanations = nodeToExplanation; - if (in.readBoolean()) { - this.clusterInfo = new ClusterInfo(in); - } else { - this.clusterInfo = null; - } + this.shardRouting = new ShardRouting(in); + this.currentNode = in.readOptionalWriteable(DiscoveryNode::new); + this.relocationTargetNode = in.readOptionalWriteable(DiscoveryNode::new); + this.clusterInfo = in.readOptionalWriteable(ClusterInfo::new); + this.shardAllocationDecision = new ShardAllocationDecision(in); } @Override public void writeTo(StreamOutput out) throws IOException { - this.getShard().writeTo(out); - out.writeBoolean(this.isPrimary()); - out.writeBoolean(this.isStillFetchingShardData()); - out.writeOptionalString(this.getAssignedNodeId()); - out.writeOptionalWriteable(this.getUnassignedInfo()); - out.writeVLong(allocationDelayMillis); - out.writeVLong(remainingDelayMillis); - - out.writeVInt(this.nodeExplanations.size()); - for (NodeExplanation explanation : this.nodeExplanations.values()) { - explanation.writeTo(out); - } - if (this.clusterInfo != null) { - out.writeBoolean(true); - this.clusterInfo.writeTo(out); - } else { - out.writeBoolean(false); - } + shardRouting.writeTo(out); + out.writeOptionalWriteable(currentNode); + out.writeOptionalWriteable(relocationTargetNode); + out.writeOptionalWriteable(clusterInfo); + shardAllocationDecision.writeTo(out); } - /** Return the shard that the explanation is about */ + /** + * Returns the shard that the explanation is about. + */ public ShardId getShard() { - return this.shard; + return shardRouting.shardId(); } - /** Return true if the explained shard is primary, false otherwise */ + /** + * Returns {@code true} if the explained shard is primary, {@code false} otherwise. + */ public boolean isPrimary() { - return this.primary; + return shardRouting.primary(); } - /** Return turn if shard data is still being fetched for the allocation */ - public boolean isStillFetchingShardData() { - return this.hasPendingAsyncFetch; + /** + * Returns the current {@link ShardRoutingState} of the shard. + */ + public ShardRoutingState getShardState() { + return shardRouting.state(); } - /** Return turn if the shard is assigned to a node */ - public boolean isAssigned() { - return this.assignedNodeId != null; - } - - /** Return the assigned node id or null if not assigned */ + /** + * Returns the currently assigned node, or {@code null} if the shard is unassigned. + */ @Nullable - public String getAssignedNodeId() { - return this.assignedNodeId; + public DiscoveryNode getCurrentNode() { + return currentNode; } - /** Return the unassigned info for the shard or null if the shard is assigned */ + /** + * Returns the relocating target node, or {@code null} if the shard is not in the {@link ShardRoutingState#RELOCATING} state. + */ + @Nullable + public DiscoveryNode getRelocationTargetNode() { + return relocationTargetNode; + } + + /** + * Returns the unassigned info for the shard, or {@code null} if the shard is active. + */ @Nullable public UnassignedInfo getUnassignedInfo() { - return this.unassignedInfo; + return shardRouting.unassignedInfo(); } - /** Return the configured delay before the shard can be allocated in milliseconds */ - public long getAllocationDelayMillis() { - return this.allocationDelayMillis; - } - - /** Return the remaining allocation delay for this shard in milliseconds */ - public long getRemainingDelayMillis() { - return this.remainingDelayMillis; - } - - /** Return a map of node to the explanation for that node */ - public Map getNodeExplanations() { - return this.nodeExplanations; - } - - /** Return the cluster disk info for the cluster or null if none available */ + /** + * Returns the cluster disk info for the cluster, or {@code null} if none available. + */ @Nullable public ClusterInfo getClusterInfo() { return this.clusterInfo; } + /** \ + * Returns the shard allocation decision for attempting to assign or move the shard. + */ + public ShardAllocationDecision getShardAllocationDecision() { + return shardAllocationDecision; + } + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); { - builder.startObject("shard"); { - builder.field("index", shard.getIndexName()); - builder.field("index_uuid", shard.getIndex().getUUID()); - builder.field("id", shard.getId()); - builder.field("primary", primary); + builder.field("index", shardRouting.getIndexName()); + builder.field("shard", shardRouting.getId()); + builder.field("primary", shardRouting.primary()); + builder.field("current_state", shardRouting.state().toString().toLowerCase(Locale.ROOT)); + if (shardRouting.unassignedInfo() != null) { + unassignedInfoToXContent(shardRouting.unassignedInfo(), builder); } - builder.endObject(); // end shard - builder.field("assigned", this.assignedNodeId != null); - // If assigned, show the node id of the node it's assigned to - if (assignedNodeId != null) { - builder.field("assigned_node_id", this.assignedNodeId); - } - builder.field("shard_state_fetch_pending", this.hasPendingAsyncFetch); - // If we have unassigned info, show that - if (unassignedInfo != null) { - unassignedInfo.toXContent(builder, params); - builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis)); - builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis)); - } - builder.startObject("nodes"); { - for (NodeExplanation explanation : nodeExplanations.values()) { - explanation.toXContent(builder, params); + if (currentNode != null) { + builder.startObject("current_node"); + { + discoveryNodeToXContent(currentNode, true, builder); + if (shardAllocationDecision.getMoveDecision().isDecisionTaken() + && shardAllocationDecision.getMoveDecision().getCurrentNodeRanking() > 0) { + builder.field("weight_ranking", shardAllocationDecision.getMoveDecision().getCurrentNodeRanking()); + } } + builder.endObject(); } - builder.endObject(); // end nodes if (this.clusterInfo != null) { builder.startObject("cluster_info"); { this.clusterInfo.toXContent(builder, params); } builder.endObject(); // end "cluster_info" } + if (shardAllocationDecision.isDecisionTaken()) { + shardAllocationDecision.toXContent(builder, params); + } else { + String explanation; + if (shardRouting.state() == ShardRoutingState.RELOCATING) { + explanation = "the shard is in the process of relocating from node [" + currentNode.getName() + "] " + + "to node [" + relocationTargetNode.getName() + "], wait until relocation has completed"; + } else { + assert shardRouting.state() == ShardRoutingState.INITIALIZING; + explanation = "the shard is in the process of initializing on node [" + currentNode.getName() + "], " + + "wait until initialization has completed"; + } + builder.field("explanation", explanation); + } } builder.endObject(); // end wrapping object return builder; } - /** An Enum representing the final decision for a shard allocation on a node */ - public enum FinalDecision { - // Yes, the shard can be assigned - YES((byte) 0), - // No, the shard cannot be assigned - NO((byte) 1), - // The shard is already assigned to this node - ALREADY_ASSIGNED((byte) 2); + private XContentBuilder unassignedInfoToXContent(UnassignedInfo unassignedInfo, XContentBuilder builder) + throws IOException { - private final byte id; - - FinalDecision (byte id) { - this.id = id; + builder.startObject("unassigned_info"); + builder.field("reason", unassignedInfo.getReason()); + builder.field("at", UnassignedInfo.DATE_TIME_FORMATTER.printer().print(unassignedInfo.getUnassignedTimeInMillis())); + if (unassignedInfo.getNumFailedAllocations() > 0) { + builder.field("failed_allocation_attempts", unassignedInfo.getNumFailedAllocations()); } - - private static FinalDecision fromId(byte id) { - switch (id) { - case 0: return YES; - case 1: return NO; - case 2: return ALREADY_ASSIGNED; - default: - throw new IllegalArgumentException("unknown id for final decision: [" + id + "]"); - } - } - - @Override - public String toString() { - switch (id) { - case 0: return "YES"; - case 1: return "NO"; - case 2: return "ALREADY_ASSIGNED"; - default: - throw new IllegalArgumentException("unknown id for final decision: [" + id + "]"); - } - } - - static FinalDecision readFrom(StreamInput in) throws IOException { - return fromId(in.readByte()); - } - - void writeTo(StreamOutput out) throws IOException { - out.writeByte(id); - } - } - - /** An Enum representing the state of the shard store's copy of the data on a node */ - public enum StoreCopy { - // No data for this shard is on the node - NONE((byte) 0), - // A copy of the data is available on this node - AVAILABLE((byte) 1), - // The copy of the data on the node is corrupt - CORRUPT((byte) 2), - // There was an error reading this node's copy of the data - IO_ERROR((byte) 3), - // The copy of the data on the node is stale - STALE((byte) 4), - // It's unknown what the copy of the data is - UNKNOWN((byte) 5); - - private final byte id; - - StoreCopy (byte id) { - this.id = id; - } - - private static StoreCopy fromId(byte id) { - switch (id) { - case 0: return NONE; - case 1: return AVAILABLE; - case 2: return CORRUPT; - case 3: return IO_ERROR; - case 4: return STALE; - case 5: return UNKNOWN; - default: - throw new IllegalArgumentException("unknown id for store copy: [" + id + "]"); - } - } - - @Override - public String toString() { - switch (id) { - case 0: return "NONE"; - case 1: return "AVAILABLE"; - case 2: return "CORRUPT"; - case 3: return "IO_ERROR"; - case 4: return "STALE"; - case 5: return "UNKNOWN"; - default: - throw new IllegalArgumentException("unknown id for store copy: [" + id + "]"); - } - } - - static StoreCopy readFrom(StreamInput in) throws IOException { - return fromId(in.readByte()); - } - - void writeTo(StreamOutput out) throws IOException { - out.writeByte(id); + String details = unassignedInfo.getDetails(); + if (details != null) { + builder.field("details", details); } + builder.field("last_allocation_status", AllocationDecision.fromAllocationStatus(unassignedInfo.getLastAllocationStatus())); + builder.endObject(); + return builder; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java deleted file mode 100644 index 9fdf97b320c..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.allocation; - -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Map; -/** The cluster allocation explanation for a single node */ -public class NodeExplanation implements Writeable, ToXContent { - private final DiscoveryNode node; - private final Decision nodeDecision; - private final Float nodeWeight; - private final IndicesShardStoresResponse.StoreStatus storeStatus; - private final ClusterAllocationExplanation.FinalDecision finalDecision; - private final ClusterAllocationExplanation.StoreCopy storeCopy; - private final String finalExplanation; - - public NodeExplanation(final DiscoveryNode node, final Decision nodeDecision, final Float nodeWeight, - @Nullable final IndicesShardStoresResponse.StoreStatus storeStatus, - final ClusterAllocationExplanation.FinalDecision finalDecision, - final String finalExplanation, - final ClusterAllocationExplanation.StoreCopy storeCopy) { - this.node = node; - this.nodeDecision = nodeDecision; - this.nodeWeight = nodeWeight; - this.storeStatus = storeStatus; - this.finalDecision = finalDecision; - this.finalExplanation = finalExplanation; - this.storeCopy = storeCopy; - } - - public NodeExplanation(StreamInput in) throws IOException { - this.node = new DiscoveryNode(in); - this.nodeDecision = Decision.readFrom(in); - this.nodeWeight = in.readFloat(); - if (in.readBoolean()) { - this.storeStatus = IndicesShardStoresResponse.StoreStatus.readStoreStatus(in); - } else { - this.storeStatus = null; - } - this.finalDecision = ClusterAllocationExplanation.FinalDecision.readFrom(in); - this.finalExplanation = in.readString(); - this.storeCopy = ClusterAllocationExplanation.StoreCopy.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - node.writeTo(out); - nodeDecision.writeTo(out); - out.writeFloat(nodeWeight); - if (storeStatus == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - storeStatus.writeTo(out); - } - finalDecision.writeTo(out); - out.writeString(finalExplanation); - storeCopy.writeTo(out); - } - - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(node.getId()); { - builder.field("node_name", node.getName()); - builder.startObject("node_attributes"); { - for (Map.Entry attrEntry : node.getAttributes().entrySet()) { - builder.field(attrEntry.getKey(), attrEntry.getValue()); - } - } - builder.endObject(); // end attributes - builder.startObject("store"); { - builder.field("shard_copy", storeCopy.toString()); - if (storeStatus != null) { - final Throwable storeErr = storeStatus.getStoreException(); - if (storeErr != null) { - builder.field("store_exception", ExceptionsHelper.detailedMessage(storeErr)); - } - } - } - builder.endObject(); // end store - builder.field("final_decision", finalDecision.toString()); - builder.field("final_explanation", finalExplanation); - builder.field("weight", nodeWeight); - builder.startArray("decisions"); - nodeDecision.toXContent(builder, params); - builder.endArray(); - } - builder.endObject(); // end node - return builder; - } - - public DiscoveryNode getNode() { - return this.node; - } - - public Decision getDecision() { - return this.nodeDecision; - } - - public Float getWeight() { - return this.nodeWeight; - } - - @Nullable - public IndicesShardStoresResponse.StoreStatus getStoreStatus() { - return this.storeStatus; - } - - public ClusterAllocationExplanation.FinalDecision getFinalDecision() { - return this.finalDecision; - } - - public String getFinalExplanation() { - return this.finalExplanation; - } - - public ClusterAllocationExplanation.StoreCopy getStoreCopy() { - return this.storeCopy; - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 5aa35a059fb..4d4796aaf3a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -19,13 +19,7 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.apache.lucene.index.CorruptIndexException; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; -import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterInfo; @@ -33,34 +27,25 @@ import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RecoverySource; -import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.MoveDecision; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation.DebugMode; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; -import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; /** * The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the @@ -72,7 +57,6 @@ public class TransportClusterAllocationExplainAction private final ClusterInfoService clusterInfoService; private final AllocationDeciders allocationDeciders; private final ShardsAllocator shardAllocator; - private final TransportIndicesShardStoresAction shardStoresAction; private final GatewayAllocator gatewayAllocator; @Inject @@ -80,14 +64,12 @@ public class TransportClusterAllocationExplainAction ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders, - ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction, - GatewayAllocator gatewayAllocator) { + ShardsAllocator shardAllocator, GatewayAllocator gatewayAllocator) { super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterAllocationExplainRequest::new); this.clusterInfoService = clusterInfoService; this.allocationDeciders = allocationDeciders; this.shardAllocator = shardAllocator; - this.shardStoresAction = shardStoresAction; this.gatewayAllocator = gatewayAllocator; } @@ -106,172 +88,6 @@ public class TransportClusterAllocationExplainAction return new ClusterAllocationExplainResponse(); } - /** - * Return the decisions for the given {@code ShardRouting} on the given {@code RoutingNode}. If {@code includeYesDecisions} is not true, - * only non-YES (NO and THROTTLE) decisions are returned. - */ - public static Decision tryShardOnNode(ShardRouting shard, RoutingNode node, RoutingAllocation allocation, boolean includeYesDecisions) { - Decision d = allocation.deciders().canAllocate(shard, node, allocation); - if (includeYesDecisions) { - return d; - } else { - Decision.Multi nonYesDecisions = new Decision.Multi(); - List decisions = d.getDecisions(); - for (Decision decision : decisions) { - if (decision.type() != Decision.Type.YES) { - nonYesDecisions.add(decision); - } - } - return nonYesDecisions; - } - } - - /** - * Construct a {@code WeightedDecision} object for the given shard given all the metadata. This also attempts to construct the human - * readable FinalDecision and final explanation as part of the explanation. - */ - public static NodeExplanation calculateNodeExplanation(ShardRouting shard, - IndexMetaData indexMetaData, - DiscoveryNode node, - Decision nodeDecision, - Float nodeWeight, - IndicesShardStoresResponse.StoreStatus storeStatus, - String assignedNodeId, - Set activeAllocationIds, - boolean hasPendingAsyncFetch) { - final ClusterAllocationExplanation.FinalDecision finalDecision; - final ClusterAllocationExplanation.StoreCopy storeCopy; - final String finalExplanation; - - if (storeStatus == null) { - // No copies of the data - storeCopy = ClusterAllocationExplanation.StoreCopy.NONE; - } else { - final Exception storeErr = storeStatus.getStoreException(); - if (storeErr != null) { - if (ExceptionsHelper.unwrapCause(storeErr) instanceof CorruptIndexException) { - storeCopy = ClusterAllocationExplanation.StoreCopy.CORRUPT; - } else { - storeCopy = ClusterAllocationExplanation.StoreCopy.IO_ERROR; - } - } else if (activeAllocationIds.isEmpty()) { - // The ids are only empty if dealing with a legacy index - // TODO: fetch the shard state versions and display here? - storeCopy = ClusterAllocationExplanation.StoreCopy.UNKNOWN; - } else if (activeAllocationIds.contains(storeStatus.getAllocationId())) { - storeCopy = ClusterAllocationExplanation.StoreCopy.AVAILABLE; - } else { - // Otherwise, this is a stale copy of the data (allocation ids don't match) - storeCopy = ClusterAllocationExplanation.StoreCopy.STALE; - } - } - - if (node.getId().equals(assignedNodeId)) { - finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED; - finalExplanation = "the shard is already assigned to this node"; - } else if (shard.unassigned() && shard.primary() == false && - shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && nodeDecision.type() != Decision.Type.YES) { - finalExplanation = "the shard cannot be assigned because allocation deciders return a " + nodeDecision.type().name() + - " decision"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else if (shard.unassigned() && shard.primary() == false && - shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && hasPendingAsyncFetch) { - finalExplanation = "the shard's state is still being fetched so it cannot be allocated"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else if (shard.primary() && shard.unassigned() && - (shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE || - shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) - && hasPendingAsyncFetch) { - finalExplanation = "the shard's state is still being fetched so it cannot be allocated"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE && - storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) { - finalExplanation = "the copy of the shard is stale, allocation ids do not match"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE && - storeCopy == ClusterAllocationExplanation.StoreCopy.NONE) { - finalExplanation = "there is no copy of the shard available"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE && - storeCopy == ClusterAllocationExplanation.StoreCopy.CORRUPT) { - finalExplanation = "the copy of the shard is corrupt"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE && - storeCopy == ClusterAllocationExplanation.StoreCopy.IO_ERROR) { - finalExplanation = "the copy of the shard cannot be read"; - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - } else { - if (nodeDecision.type() == Decision.Type.NO) { - finalDecision = ClusterAllocationExplanation.FinalDecision.NO; - finalExplanation = "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision"; - } else { - // TODO: handle throttling decision better here - finalDecision = ClusterAllocationExplanation.FinalDecision.YES; - if (storeCopy == ClusterAllocationExplanation.StoreCopy.AVAILABLE) { - finalExplanation = "the shard can be assigned and the node contains a valid copy of the shard data"; - } else { - finalExplanation = "the shard can be assigned"; - } - } - } - return new NodeExplanation(node, nodeDecision, nodeWeight, storeStatus, finalDecision, finalExplanation, storeCopy); - } - - - /** - * For the given {@code ShardRouting}, return the explanation of the allocation for that shard on all nodes. If {@code - * includeYesDecisions} is true, returns all decisions, otherwise returns only 'NO' and 'THROTTLE' decisions. - */ - public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes, - boolean includeYesDecisions, ShardsAllocator shardAllocator, - List shardStores, - GatewayAllocator gatewayAllocator, ClusterInfo clusterInfo) { - // don't short circuit deciders, we want a full explanation - allocation.debugDecision(true); - // get the existing unassigned info if available - UnassignedInfo ui = shard.unassignedInfo(); - - Map nodeToDecision = new HashMap<>(); - for (RoutingNode node : routingNodes) { - DiscoveryNode discoNode = node.node(); - if (discoNode.isDataNode()) { - Decision d = tryShardOnNode(shard, node, allocation, includeYesDecisions); - nodeToDecision.put(discoNode, d); - } - } - long remainingDelayMillis = 0; - final MetaData metadata = allocation.metaData(); - final IndexMetaData indexMetaData = metadata.index(shard.index()); - long allocationDelayMillis = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).getMillis(); - if (ui != null && ui.isDelayed()) { - long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), indexMetaData.getSettings()); - remainingDelayMillis = TimeValue.timeValueNanos(remainingDelayNanos).millis(); - } - - // Calculate weights for each of the nodes - Map weights = shardAllocator.weighShard(allocation, shard); - - Map nodeToStatus = new HashMap<>(shardStores.size()); - for (IndicesShardStoresResponse.StoreStatus status : shardStores) { - nodeToStatus.put(status.getNode(), status); - } - - Map explanations = new HashMap<>(shardStores.size()); - for (Map.Entry entry : nodeToDecision.entrySet()) { - DiscoveryNode node = entry.getKey(); - Decision decision = entry.getValue(); - Float weight = weights.get(node); - IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node); - NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight, - storeStatus, shard.currentNodeId(), indexMetaData.inSyncAllocationIds(shard.getId()), - allocation.hasPendingAsyncFetch()); - explanations.put(node, nodeExplanation); - } - return new ClusterAllocationExplanation(shard.shardId(), shard.primary(), - shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui, - gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations, clusterInfo); - } - @Override protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state, final ActionListener listener) { @@ -280,66 +96,105 @@ public class TransportClusterAllocationExplainAction final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state, clusterInfo, System.nanoTime(), false); + ShardRouting shardRouting = findShardToExplain(request, allocation); + logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting); + + ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, + request.includeDiskInfo() ? clusterInfo : null, request.includeYesDecisions(), gatewayAllocator, shardAllocator); + listener.onResponse(new ClusterAllocationExplainResponse(cae)); + } + + // public for testing + public static ClusterAllocationExplanation explainShard(ShardRouting shardRouting, RoutingAllocation allocation, + ClusterInfo clusterInfo, boolean includeYesDecisions, + GatewayAllocator gatewayAllocator, ShardsAllocator shardAllocator) { + allocation.setDebugMode(includeYesDecisions ? DebugMode.ON : DebugMode.EXCLUDE_YES_DECISIONS); + + ShardAllocationDecision shardDecision; + if (shardRouting.initializing() || shardRouting.relocating()) { + shardDecision = ShardAllocationDecision.NOT_TAKEN; + } else { + AllocateUnassignedDecision allocateDecision = shardRouting.unassigned() ? + gatewayAllocator.decideUnassignedShardAllocation(shardRouting, allocation) : AllocateUnassignedDecision.NOT_TAKEN; + if (allocateDecision.isDecisionTaken() == false) { + shardDecision = shardAllocator.decideShardAllocation(shardRouting, allocation); + } else { + shardDecision = new ShardAllocationDecision(allocateDecision, MoveDecision.NOT_TAKEN); + } + } + + return new ClusterAllocationExplanation(shardRouting, + shardRouting.currentNodeId() != null ? allocation.nodes().get(shardRouting.currentNodeId()) : null, + shardRouting.relocatingNodeId() != null ? allocation.nodes().get(shardRouting.relocatingNodeId()) : null, + clusterInfo, shardDecision); + } + + // public for testing + public static ShardRouting findShardToExplain(ClusterAllocationExplainRequest request, RoutingAllocation allocation) { ShardRouting foundShard = null; if (request.useAnyUnassignedShard()) { // If we can use any shard, just pick the first unassigned one (if there are any) - RoutingNodes.UnassignedShards.UnassignedIterator ui = routingNodes.unassigned().iterator(); + RoutingNodes.UnassignedShards.UnassignedIterator ui = allocation.routingNodes().unassigned().iterator(); if (ui.hasNext()) { foundShard = ui.next(); } + if (foundShard == null) { + throw new IllegalStateException("unable to find any unassigned shards to explain [" + request + "]"); + } } else { String index = request.getIndex(); int shard = request.getShard(); if (request.isPrimary()) { // If we're looking for the primary shard, there's only one copy, so pick it directly foundShard = allocation.routingTable().shardRoutingTable(index, shard).primaryShard(); + if (request.getCurrentNode() != null) { + DiscoveryNode primaryNode = allocation.nodes().resolveNode(request.getCurrentNode()); + // the primary is assigned to a node other than the node specified in the request + if (primaryNode.getId().equals(foundShard.currentNodeId()) == false) { + throw new IllegalStateException("unable to find primary shard assigned to node [" + request.getCurrentNode() + "]"); + } + } } else { // If looking for a replica, go through all the replica shards List replicaShardRoutings = allocation.routingTable().shardRoutingTable(index, shard).replicaShards(); - if (replicaShardRoutings.size() > 0) { - // Pick the first replica at the very least - foundShard = replicaShardRoutings.get(0); - // In case there are multiple replicas where some are assigned and some aren't, - // try to find one that is unassigned at least + if (request.getCurrentNode() != null) { + // the request is to explain a replica shard already assigned on a particular node, + // so find that shard copy + DiscoveryNode replicaNode = allocation.nodes().resolveNode(request.getCurrentNode()); for (ShardRouting replica : replicaShardRoutings) { - if (replica.unassigned()) { + if (replicaNode.getId().equals(replica.currentNodeId())) { foundShard = replica; break; } } + if (foundShard == null) { + throw new IllegalStateException("unable to find a replica shard assigned to node [" + + request.getCurrentNode() + "]"); + } + } else { + if (replicaShardRoutings.size() > 0) { + // Pick the first replica at the very least + foundShard = replicaShardRoutings.get(0); + for (ShardRouting replica : replicaShardRoutings) { + // In case there are multiple replicas where some are assigned and some aren't, + // try to find one that is unassigned at least + if (replica.unassigned()) { + foundShard = replica; + break; + } else if (replica.started() && (foundShard.initializing() || foundShard.relocating())) { + // prefer started shards to initializing or relocating shards because started shards + // can be explained + foundShard = replica; + } + } + } } } } if (foundShard == null) { - listener.onFailure(new ElasticsearchException("unable to find any shards to explain [{}] in the routing table", request)); - return; + throw new IllegalStateException("unable to find any shards to explain [" + request + "] in the routing table"); } - final ShardRouting shardRouting = foundShard; - logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting); - - getShardStores(shardRouting, new ActionListener() { - @Override - public void onResponse(IndicesShardStoresResponse shardStoreResponse) { - ImmutableOpenIntMap> shardStatuses = - shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName()); - List shardStoreStatus = shardStatuses.get(shardRouting.id()); - ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes, - request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator, - request.includeDiskInfo() ? clusterInfo : null); - listener.onResponse(new ClusterAllocationExplainResponse(cae)); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); - } - - private void getShardStores(ShardRouting shard, final ActionListener listener) { - IndicesShardStoresRequest request = new IndicesShardStoresRequest(shard.getIndexName()); - request.shardStatuses("all"); - shardStoresAction.execute(request, listener); + return foundShard; } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java index 586b37846cf..7ee17c558f7 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java @@ -32,6 +32,7 @@ import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.stream.Collectors; /** @@ -139,7 +140,7 @@ public abstract class AbstractAllocationDecision implements ToXContent, Writeabl public XContentBuilder nodeDecisionsToXContent(List nodeDecisions, XContentBuilder builder, Params params) throws IOException { - if (nodeDecisions != null) { + if (nodeDecisions != null && nodeDecisions.isEmpty() == false) { builder.startArray("node_allocation_decisions"); { for (NodeAllocationResult explanation : nodeDecisions) { @@ -166,4 +167,21 @@ public abstract class AbstractAllocationDecision implements ToXContent, Writeabl return false; } + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || other instanceof AbstractAllocationDecision == false) { + return false; + } + @SuppressWarnings("unchecked") AbstractAllocationDecision that = (AbstractAllocationDecision) other; + return Objects.equals(targetNode, that.targetNode) && Objects.equals(nodeDecisions, that.nodeDecisions); + } + + @Override + public int hashCode() { + return Objects.hash(targetNode, nodeDecisions); + } + } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java index 1100d4d70d3..bfbc3e59f7f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java @@ -174,7 +174,7 @@ public class AllocateUnassignedDecision extends AbstractAllocationDecision { @Override public boolean isDecisionTaken() { - return this != NOT_TAKEN; + return allocationStatus != AllocationStatus.NO_ATTEMPT; } /** @@ -238,35 +238,34 @@ public class AllocateUnassignedDecision extends AbstractAllocationDecision { @Override public String getExplanation() { checkDecisionState(); - String explanation; - if (allocationStatus == null) { - explanation = "can allocate the shard"; - } else if (allocationStatus == AllocationStatus.DECIDERS_THROTTLED) { - explanation = "allocation temporarily throttled"; - } else if (allocationStatus == AllocationStatus.FETCHING_SHARD_DATA) { - explanation = "cannot allocate because information about existing shard data is still being retrieved from " + - "some of the nodes"; - } else if (allocationStatus == AllocationStatus.NO_VALID_SHARD_COPY) { - if (getNodeDecisions() != null && getNodeDecisions().size() > 0) { - explanation = "cannot allocate because all existing copies of the shard are unreadable"; + AllocationDecision allocationDecision = getAllocationDecision(); + if (allocationDecision == AllocationDecision.YES) { + return "can allocate the shard"; + } else if (allocationDecision == AllocationDecision.THROTTLED) { + return "allocation temporarily throttled"; + } else if (allocationDecision == AllocationDecision.AWAITING_INFO) { + return "cannot allocate because information about existing shard data is still being retrieved from some of the nodes"; + } else if (allocationDecision == AllocationDecision.NO_VALID_SHARD_COPY) { + if (getNodeDecisions() != null && getNodeDecisions().isEmpty() == false) { + return "cannot allocate because all found copies of the shard are either stale or corrupt"; } else { - explanation = "cannot allocate because a previous copy of the shard existed but could not be found"; + return "cannot allocate because a previous copy of the primary shard existed but can no longer be found on " + + "the nodes in the cluster"; } - } else if (allocationStatus == AllocationStatus.DELAYED_ALLOCATION) { - explanation = "cannot allocate because the cluster is still waiting " + + } else if (allocationDecision == AllocationDecision.ALLOCATION_DELAYED) { + return "cannot allocate because the cluster is still waiting " + TimeValue.timeValueMillis(remainingDelayInMillis) + " for the departed node holding a replica to rejoin" + (atLeastOneNodeWithYesDecision() ? ", despite being allowed to allocate the shard to at least one other node" : ""); } else { - assert allocationStatus == AllocationStatus.DECIDERS_NO; + assert allocationDecision == AllocationDecision.NO; if (reuseStore) { - explanation = "cannot allocate because allocation is not permitted to any of the nodes that hold an in-sync shard copy"; + return "cannot allocate because allocation is not permitted to any of the nodes that hold an in-sync shard copy"; } else { - explanation = "cannot allocate because allocation is not permitted to any of the nodes"; + return "cannot allocate because allocation is not permitted to any of the nodes"; } } - return explanation; } @Override @@ -300,4 +299,26 @@ public class AllocateUnassignedDecision extends AbstractAllocationDecision { out.writeVLong(configuredDelayInMillis); } + @Override + public boolean equals(Object other) { + if (super.equals(other) == false) { + return false; + } + if (other instanceof AllocateUnassignedDecision == false) { + return false; + } + @SuppressWarnings("unchecked") AllocateUnassignedDecision that = (AllocateUnassignedDecision) other; + return Objects.equals(allocationStatus, that.allocationStatus) + && Objects.equals(allocationId, that.allocationId) + && reuseStore == that.reuseStore + && configuredDelayInMillis == that.configuredDelayInMillis + && remainingDelayInMillis == that.remainingDelayInMillis; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(allocationStatus, allocationId, reuseStore, + configuredDelayInMillis, remainingDelayInMillis); + } + } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationDecision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationDecision.java index 5a49cfb142e..0fe9549635e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationDecision.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationDecision.java @@ -40,7 +40,7 @@ public enum AllocationDecision implements Writeable { /** * The allocation attempt was throttled for the shard. */ - THROTTLE((byte) 1), + THROTTLED((byte) 1), /** * The shard cannot be allocated, which can happen for any number of reasons, * including the allocation deciders gave a NO decision for allocating. @@ -56,12 +56,12 @@ public enum AllocationDecision implements Writeable { * Waiting on getting shard data from all nodes before making a decision * about where to allocate the shard. */ - FETCH_PENDING((byte) 4), + AWAITING_INFO((byte) 4), /** * The allocation decision has been delayed waiting for a replica with a shard copy * that left the cluster to rejoin. */ - DELAYED_ALLOCATION((byte) 5), + ALLOCATION_DELAYED((byte) 5), /** * The shard was denied allocation because there were no valid shard copies * found for it amongst the nodes in the cluster. @@ -90,15 +90,15 @@ public enum AllocationDecision implements Writeable { case 0: return YES; case 1: - return THROTTLE; + return THROTTLED; case 2: return NO; case 3: return WORSE_BALANCE; case 4: - return FETCH_PENDING; + return AWAITING_INFO; case 5: - return DELAYED_ALLOCATION; + return ALLOCATION_DELAYED; case 6: return NO_VALID_SHARD_COPY; case 7: @@ -117,11 +117,11 @@ public enum AllocationDecision implements Writeable { } else { switch (allocationStatus) { case DECIDERS_THROTTLED: - return THROTTLE; + return THROTTLED; case FETCHING_SHARD_DATA: - return FETCH_PENDING; + return AWAITING_INFO; case DELAYED_ALLOCATION: - return DELAYED_ALLOCATION; + return ALLOCATION_DELAYED; case NO_VALID_SHARD_COPY: return NO_VALID_SHARD_COPY; case NO_ATTEMPT: @@ -141,7 +141,7 @@ public enum AllocationDecision implements Writeable { case YES: return YES; case THROTTLE: - return THROTTLE; + return THROTTLED; default: assert type == Decision.Type.NO; return NO; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java index 676e6107cb2..de9795ff4c2 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.List; +import java.util.Objects; /** * Represents a decision to move a started shard, either because it is no longer allowed to remain on its current node @@ -48,15 +49,15 @@ public final class MoveDecision extends AbstractAllocationDecision { @Nullable private final Decision canRemainDecision; @Nullable - private final Decision canRebalanceDecision; + private final Decision clusterRebalanceDecision; private final int currentNodeRanking; - private MoveDecision(Decision canRemainDecision, Decision canRebalanceDecision, AllocationDecision allocationDecision, + private MoveDecision(Decision canRemainDecision, Decision clusterRebalanceDecision, AllocationDecision allocationDecision, DiscoveryNode assignedNode, List nodeDecisions, int currentNodeRanking) { super(assignedNode, nodeDecisions); this.allocationDecision = allocationDecision; this.canRemainDecision = canRemainDecision; - this.canRebalanceDecision = canRebalanceDecision; + this.clusterRebalanceDecision = clusterRebalanceDecision; this.currentNodeRanking = currentNodeRanking; } @@ -64,7 +65,7 @@ public final class MoveDecision extends AbstractAllocationDecision { super(in); allocationDecision = in.readOptionalWriteable(AllocationDecision::readFrom); canRemainDecision = in.readOptionalWriteable(Decision::readFrom); - canRebalanceDecision = in.readOptionalWriteable(Decision::readFrom); + clusterRebalanceDecision = in.readOptionalWriteable(Decision::readFrom); currentNodeRanking = in.readVInt(); } @@ -73,7 +74,7 @@ public final class MoveDecision extends AbstractAllocationDecision { super.writeTo(out); out.writeOptionalWriteable(allocationDecision); out.writeOptionalWriteable(canRemainDecision); - out.writeOptionalWriteable(canRebalanceDecision); + out.writeOptionalWriteable(clusterRebalanceDecision); out.writeVInt(currentNodeRanking); } @@ -131,7 +132,15 @@ public final class MoveDecision extends AbstractAllocationDecision { @Override public boolean isDecisionTaken() { - return this != NOT_TAKEN; + return canRemainDecision != null || clusterRebalanceDecision != null; + } + + /** + * Creates a new move decision from this decision, plus adding a remain decision. + */ + public MoveDecision withRemainDecision(Decision canRemainDecision) { + return new MoveDecision(canRemainDecision, clusterRebalanceDecision, allocationDecision, + targetNode, nodeDecisions, currentNodeRanking); } /** @@ -164,13 +173,13 @@ public final class MoveDecision extends AbstractAllocationDecision { /** * Returns {@code true} if the shard is allowed to be rebalanced to another node in the cluster, - * returns {@code false} otherwise. If {@link #getCanRebalanceDecision()} returns {@code null}, then + * returns {@code false} otherwise. If {@link #getClusterRebalanceDecision()} returns {@code null}, then * the result of this method is meaningless, as no rebalance decision was taken. If {@link #isDecisionTaken()} * returns {@code false}, then invoking this method will throw an {@code IllegalStateException}. */ - public boolean canRebalance() { + public boolean canRebalanceCluster() { checkDecisionState(); - return canRebalanceDecision.type() == Type.YES; + return clusterRebalanceDecision != null && clusterRebalanceDecision.type() == Type.YES; } /** @@ -182,9 +191,9 @@ public final class MoveDecision extends AbstractAllocationDecision { * {@code IllegalStateException}. */ @Nullable - public Decision getCanRebalanceDecision() { + public Decision getClusterRebalanceDecision() { checkDecisionState(); - return canRebalanceDecision; + return clusterRebalanceDecision; } /** @@ -199,7 +208,7 @@ public final class MoveDecision extends AbstractAllocationDecision { /** * Gets the current ranking of the node to which the shard is currently assigned, relative to the * other nodes in the cluster as reported in {@link NodeAllocationResult#getWeightRanking()}. The - * ranking will only return a meaningful positive integer if {@link #getCanRebalanceDecision()} returns + * ranking will only return a meaningful positive integer if {@link #getClusterRebalanceDecision()} returns * a non-null value; otherwise, 0 will be returned. If {@link #isDecisionTaken()} returns * {@code false}, then invoking this method will throw an {@code IllegalStateException}. */ @@ -212,18 +221,19 @@ public final class MoveDecision extends AbstractAllocationDecision { public String getExplanation() { checkDecisionState(); String explanation; - if (canRebalanceDecision != null) { + if (clusterRebalanceDecision != null) { // it was a decision to rebalance the shard, because the shard was allowed to remain on its current node - if (allocationDecision == AllocationDecision.FETCH_PENDING) { + if (allocationDecision == AllocationDecision.AWAITING_INFO) { explanation = "cannot rebalance as information about existing copies of this shard in the cluster is still being gathered"; - } else if (canRebalanceDecision.type() == Type.NO) { - explanation = "rebalancing is not allowed on the cluster" + (atLeastOneNodeWithYesDecision() ? ", even though there " + + } else if (clusterRebalanceDecision.type() == Type.NO) { + explanation = "rebalancing is not allowed" + (atLeastOneNodeWithYesDecision() ? ", even though there " + "is at least one node on which the shard can be allocated" : ""); - } else if (canRebalanceDecision.type() == Type.THROTTLE) { + } else if (clusterRebalanceDecision.type() == Type.THROTTLE) { explanation = "rebalancing is throttled"; } else { + assert clusterRebalanceDecision.type() == Type.YES; if (getTargetNode() != null) { - if (allocationDecision == AllocationDecision.THROTTLE) { + if (allocationDecision == AllocationDecision.THROTTLED) { explanation = "shard rebalancing throttled"; } else { explanation = "can rebalance shard"; @@ -235,11 +245,10 @@ public final class MoveDecision extends AbstractAllocationDecision { } } else { // it was a decision to force move the shard - if (canRemain()) { - explanation = "shard can remain on its current node"; - } else if (allocationDecision == AllocationDecision.YES) { + assert canRemain() == false; + if (allocationDecision == AllocationDecision.YES) { explanation = "shard cannot remain on this node and is force-moved to another node"; - } else if (allocationDecision == AllocationDecision.THROTTLE) { + } else if (allocationDecision == AllocationDecision.THROTTLED) { explanation = "shard cannot remain on this node but is throttled on moving to another node"; } else { assert allocationDecision == AllocationDecision.NO; @@ -263,23 +272,44 @@ public final class MoveDecision extends AbstractAllocationDecision { canRemainDecision.toXContent(builder, params); builder.endArray(); } - if (canRebalanceDecision != null) { - AllocationDecision rebalanceDecision = AllocationDecision.fromDecisionType(canRebalanceDecision.type()); + if (clusterRebalanceDecision != null) { + AllocationDecision rebalanceDecision = AllocationDecision.fromDecisionType(clusterRebalanceDecision.type()); builder.field("can_rebalance_cluster", rebalanceDecision); - if (rebalanceDecision != AllocationDecision.YES && canRebalanceDecision.getDecisions().isEmpty() == false) { + if (rebalanceDecision != AllocationDecision.YES && clusterRebalanceDecision.getDecisions().isEmpty() == false) { builder.startArray("can_rebalance_cluster_decisions"); - canRebalanceDecision.toXContent(builder, params); + clusterRebalanceDecision.toXContent(builder, params); builder.endArray(); } } - if (canRebalanceDecision != null) { + if (clusterRebalanceDecision != null) { builder.field("can_rebalance_to_other_node", allocationDecision); + builder.field("rebalance_explanation", getExplanation()); } else { builder.field("can_move_to_other_node", forceMove() ? "yes" : "no"); + builder.field("move_explanation", getExplanation()); } - builder.field(canRebalanceDecision != null ? "rebalance_explanation" : "move_explanation", getExplanation()); nodeDecisionsToXContent(nodeDecisions, builder, params); return builder; } + @Override + public boolean equals(Object other) { + if (super.equals(other) == false) { + return false; + } + if (other instanceof MoveDecision == false) { + return false; + } + @SuppressWarnings("unchecked") MoveDecision that = (MoveDecision) other; + return Objects.equals(allocationDecision, that.allocationDecision) + && Objects.equals(canRemainDecision, that.canRemainDecision) + && Objects.equals(clusterRebalanceDecision, that.clusterRebalanceDecision) + && currentNodeRanking == that.currentNodeRanking; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(allocationDecision, canRemainDecision, clusterRebalanceDecision, currentNodeRanking); + } + } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index 1899ea1cc55..e1ae367bebf 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -68,7 +68,7 @@ public class RoutingAllocation { private final boolean retryFailed; - private boolean debugDecision = false; + private DebugMode debugDecision = DebugMode.OFF; private boolean hasPendingAsyncFetch = false; @@ -167,11 +167,19 @@ public class RoutingAllocation { return this.ignoreDisable; } - public void debugDecision(boolean debug) { + public void setDebugMode(DebugMode debug) { this.debugDecision = debug; } + public void debugDecision(boolean debug) { + this.debugDecision = debug ? DebugMode.ON : DebugMode.OFF; + } + public boolean debugDecision() { + return this.debugDecision != DebugMode.OFF; + } + + public DebugMode getDebugMode() { return this.debugDecision; } @@ -280,4 +288,20 @@ public class RoutingAllocation { public boolean isRetryFailed() { return retryFailed; } + + public enum DebugMode { + /** + * debug mode is off + */ + OFF, + /** + * debug mode is on + */ + ON, + /** + * debug mode is on, but YES decisions from a {@link org.elasticsearch.cluster.routing.allocation.decider.Decision.Multi} + * are not included. + */ + EXCLUDE_YES_DECISIONS + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java new file mode 100644 index 00000000000..557ce9300c6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Represents the decision taken for the allocation of a single shard. If + * the shard is unassigned, {@link #getAllocateDecision()} will return an + * object containing the decision and its explanation, and {@link #getMoveDecision()} + * will return an object for which {@link MoveDecision#isDecisionTaken()} returns + * {@code false}. If the shard is in the started state, then {@link #getMoveDecision()} + * will return an object containing the decision to move/rebalance the shard, and + * {@link #getAllocateDecision()} will return an object for which + * {@link AllocateUnassignedDecision#isDecisionTaken()} returns {@code false}. If + * the shard is neither unassigned nor started (i.e. it is initializing or relocating), + * then both {@link #getAllocateDecision()} and {@link #getMoveDecision()} will return + * objects whose {@code isDecisionTaken()} method returns {@code false}. + */ +public final class ShardAllocationDecision implements ToXContent, Writeable { + public static final ShardAllocationDecision NOT_TAKEN = + new ShardAllocationDecision(AllocateUnassignedDecision.NOT_TAKEN, MoveDecision.NOT_TAKEN); + + private final AllocateUnassignedDecision allocateDecision; + private final MoveDecision moveDecision; + + public ShardAllocationDecision(AllocateUnassignedDecision allocateDecision, + MoveDecision moveDecision) { + this.allocateDecision = allocateDecision; + this.moveDecision = moveDecision; + } + + public ShardAllocationDecision(StreamInput in) throws IOException { + allocateDecision = new AllocateUnassignedDecision(in); + moveDecision = new MoveDecision(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + allocateDecision.writeTo(out); + moveDecision.writeTo(out); + } + + /** + * Returns {@code true} if either an allocation decision or a move decision was taken + * for the shard. If no decision was taken, as in the case of initializing or relocating + * shards, then this method returns {@code false}. + */ + public boolean isDecisionTaken() { + return allocateDecision.isDecisionTaken() || moveDecision.isDecisionTaken(); + } + + /** + * Gets the unassigned allocation decision for the shard. If the shard was not in the unassigned state, + * the instance of {@link AllocateUnassignedDecision} that is returned will have {@link AllocateUnassignedDecision#isDecisionTaken()} + * return {@code false}. + */ + public AllocateUnassignedDecision getAllocateDecision() { + return allocateDecision; + } + + /** + * Gets the move decision for the shard. If the shard was not in the started state, + * the instance of {@link MoveDecision} that is returned will have {@link MoveDecision#isDecisionTaken()} + * return {@code false}. + */ + public MoveDecision getMoveDecision() { + return moveDecision; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (allocateDecision.isDecisionTaken()) { + allocateDecision.toXContent(builder, params); + } + if (moveDecision.isDecisionTaken()) { + moveDecision.toXContent(builder, params); + } + return builder; + } + +} diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 46a2a2524cb..9943aaccae9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationDecision; import org.elasticsearch.cluster.routing.allocation.MoveDecision; import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; @@ -46,6 +47,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.gateway.PriorityComparator; import java.util.ArrayList; @@ -110,12 +112,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards this.threshold = threshold; } - @Override - public Map weighShard(RoutingAllocation allocation, ShardRouting shard) { - final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold); - return balancer.weighShard(shard); - } - @Override public void allocate(RoutingAllocation allocation) { if (allocation.routingNodes().size() == 0) { @@ -128,16 +124,21 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards balancer.balance(); } - /** - * Returns a decision on rebalancing a single shard to form a more optimal cluster balance. This - * method is not used in itself for cluster rebalancing because all shards from all indices are - * taken into account when making rebalancing decisions. This method is only intended to be used - * from the cluster allocation explain API to explain possible rebalancing decisions for a single - * shard. - */ - public MoveDecision decideRebalance(final ShardRouting shard, final RoutingAllocation allocation) { - assert allocation.debugDecision() : "debugDecision should be set in explain mode"; - return new Balancer(logger, allocation, weightFunction, threshold).decideRebalance(shard); + @Override + public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, final RoutingAllocation allocation) { + Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold); + AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; + MoveDecision moveDecision = MoveDecision.NOT_TAKEN; + if (shard.unassigned()) { + allocateUnassignedDecision = balancer.decideAllocateUnassigned(shard, Sets.newHashSet()); + } else { + moveDecision = balancer.decideMove(shard); + if (moveDecision.isDecisionTaken() && moveDecision.canRemain()) { + MoveDecision rebalanceDecision = balancer.decideRebalance(shard); + moveDecision = rebalanceDecision.withRemainDecision(moveDecision.getCanRemainDecision()); + } + } + return new ShardAllocationDecision(allocateUnassignedDecision, moveDecision); } /** @@ -337,7 +338,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards */ private MoveDecision decideRebalance(final ShardRouting shard) { if (shard.started() == false) { - // cannot rebalance a shard that isn't started + // we can only rebalance started shards return MoveDecision.NOT_TAKEN; } @@ -437,7 +438,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } if (canRebalance.type() != Type.YES || allocation.hasPendingAsyncFetch()) { - AllocationDecision allocationDecision = allocation.hasPendingAsyncFetch() ? AllocationDecision.FETCH_PENDING : + AllocationDecision allocationDecision = allocation.hasPendingAsyncFetch() ? AllocationDecision.AWAITING_INFO : AllocationDecision.fromDecisionType(canRebalance.type()); return MoveDecision.cannotRebalance(canRebalance, allocationDecision, currentNodeWeightRanking, nodeDecisions); } else { @@ -644,7 +645,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards // offloading the shards. for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(); it.hasNext(); ) { ShardRouting shardRouting = it.next(); - final MoveDecision moveDecision = makeMoveDecision(shardRouting); + final MoveDecision moveDecision = decideMove(shardRouting); if (moveDecision.isDecisionTaken() && moveDecision.forceMove()) { final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); final ModelNode targetNode = nodes.get(moveDecision.getTargetNode().getId()); @@ -673,7 +674,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * 4. If the method is invoked in explain mode (e.g. from the cluster allocation explain APIs), then * {@link MoveDecision#nodeDecisions} will have a non-null value. */ - public MoveDecision makeMoveDecision(final ShardRouting shardRouting) { + public MoveDecision decideMove(final ShardRouting shardRouting) { if (shardRouting.started() == false) { // we can only move started shards return MoveDecision.NOT_TAKEN; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java index 35f3b265418..7e9d15b4528 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java @@ -19,11 +19,12 @@ package org.elasticsearch.cluster.routing.allocation.allocator; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.elasticsearch.cluster.routing.allocation.MoveDecision; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; -import java.util.Map; /** *

* A {@link ShardsAllocator} is the main entry point for shard allocation on nodes in the cluster. @@ -44,13 +45,17 @@ public interface ShardsAllocator { void allocate(RoutingAllocation allocation); /** - * Returns a map of node to a float "weight" of where the allocator would like to place the shard. - * Higher weights signify greater desire to place the shard on that node. - * Does not modify the allocation at all. + * Returns the decision for where a shard should reside in the cluster. If the shard is unassigned, + * then the {@link AllocateUnassignedDecision} will be non-null. If the shard is not in the unassigned + * state, then the {@link MoveDecision} will be non-null. * - * @param allocation current node allocation - * @param shard shard to weigh - * @return map of nodes to float weights + * This method is primarily used by the cluster allocation explain API to provide detailed explanations + * for the allocation of a single shard. Implementations of the {@link #allocate(RoutingAllocation)} method + * may use the results of this method implementation to decide on allocating shards in the routing table + * to the cluster. + * + * If an implementation of this interface does not support explaining decisions for a single shard through + * the cluster explain API, then this method should throw a {@code UnsupportedOperationException}. */ - Map weighShard(RoutingAllocation allocation, ShardRouting shard); + ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java index 986613e5a42..53e67ba25a4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java @@ -23,13 +23,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import java.util.Collection; import java.util.Collections; -import java.util.List; -import java.util.Set; + +import static org.elasticsearch.cluster.routing.allocation.RoutingAllocation.DebugMode.EXCLUDE_YES_DECISIONS; /** * A composite {@link AllocationDecider} combining the "decision" of multiple @@ -56,7 +55,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } @@ -82,7 +82,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { // the assumption is that a decider that returns the static instance Decision#ALWAYS // does not really implements canAllocate ret.add(decision); @@ -112,7 +113,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } @@ -131,7 +133,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } @@ -150,7 +153,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } @@ -169,7 +173,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } @@ -188,7 +193,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } @@ -216,7 +222,8 @@ public class AllocationDeciders extends AllocationDecider { } else { ret.add(decision); } - } else if (decision != Decision.ALWAYS) { + } else if (decision != Decision.ALWAYS + && (allocation.getDebugMode() != EXCLUDE_YES_DECISIONS || decision.type() != Decision.Type.YES)) { ret.add(decision); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java index 34f612f6ac9..a2198ad90d9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java @@ -140,6 +140,12 @@ public abstract class Decision implements ToXContent, Writeable { @Nullable public abstract String label(); + /** + * Get the explanation for this decision. + */ + @Nullable + public abstract String getExplanation(); + /** * Return the list of all decisions that make up this decision */ @@ -200,6 +206,7 @@ public abstract class Decision implements ToXContent, Writeable { /** * Returns the explanation string, fully formatted. Only formats the string once. */ + @Override @Nullable public String getExplanation() { if (explanationString == null && explanation != null) { @@ -301,6 +308,12 @@ public abstract class Decision implements ToXContent, Writeable { return null; } + @Override + @Nullable + public String getExplanation() { + throw new UnsupportedOperationException("multi-level decisions do not have an explanation"); + } + @Override public List getDecisions() { return Collections.unmodifiableList(this.decisions); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 5a140c51936..7bb073a4c45 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -178,10 +178,12 @@ public class EnableAllocationDecider extends AllocationDecider { } private static String setting(Allocation allocation, boolean usedIndexSetting) { - StringBuilder buf = new StringBuilder("["); + StringBuilder buf = new StringBuilder(); if (usedIndexSetting) { + buf.append("index setting ["); buf.append(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey()); } else { + buf.append("cluster setting ["); buf.append(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey()); } buf.append("=").append(allocation.toString().toLowerCase(Locale.ROOT)).append("]"); diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index 605fe20a33a..56f8b71ee58 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.service.ClusterService; @@ -57,24 +58,6 @@ public class GatewayAllocator extends AbstractComponent { this.replicaShardAllocator = new InternalReplicaShardAllocator(settings, storeAction); } - /** - * Returns true if the given shard has an async fetch pending - */ - public boolean hasFetchPending(ShardId shardId, boolean primary) { - if (primary) { - AsyncShardFetch fetch = asyncFetchStarted.get(shardId); - if (fetch != null) { - return fetch.getNumberOfInFlightFetches() > 0; - } - } else { - AsyncShardFetch fetch = asyncFetchStore.get(shardId); - if (fetch != null) { - return fetch.getNumberOfInFlightFetches() > 0; - } - } - return false; - } - public void setReallocation(final ClusterService clusterService, final RoutingService routingService) { this.routingService = routingService; clusterService.addStateApplier(event -> { @@ -137,6 +120,18 @@ public class GatewayAllocator extends AbstractComponent { replicaShardAllocator.allocateUnassigned(allocation); } + /** + * Computes and returns the design for allocating a single unassigned shard. If called on an assigned shard, + * {@link AllocateUnassignedDecision#NOT_TAKEN} is returned. + */ + public AllocateUnassignedDecision decideUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation routingAllocation) { + if (unassignedShard.primary()) { + return primaryShardAllocator.makeAllocationDecision(unassignedShard, routingAllocation, logger); + } else { + return replicaShardAllocator.makeAllocationDecision(unassignedShard, routingAllocation, logger); + } + } + class InternalAsyncFetch extends AsyncShardFetch { public InternalAsyncFetch(Logger logger, String type, ShardId shardId, Lister, T> action) { @@ -161,11 +156,8 @@ public class GatewayAllocator extends AbstractComponent { @Override protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { - AsyncShardFetch fetch = asyncFetchStarted.get(shard.shardId()); - if (fetch == null) { - fetch = new InternalAsyncFetch<>(logger, "shard_started", shard.shardId(), startedAction); - asyncFetchStarted.put(shard.shardId(), fetch); - } + AsyncShardFetch fetch = + asyncFetchStarted.computeIfAbsent(shard.shardId(), shardId -> new InternalAsyncFetch<>(logger, "shard_started", shardId, startedAction)); AsyncShardFetch.FetchResult shardState = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); @@ -187,11 +179,8 @@ public class GatewayAllocator extends AbstractComponent { @Override protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { - AsyncShardFetch fetch = asyncFetchStore.get(shard.shardId()); - if (fetch == null) { - fetch = new InternalAsyncFetch<>(logger, "shard_store", shard.shardId(), storeAction); - asyncFetchStore.put(shard.shardId(), fetch); - } + AsyncShardFetch fetch = + asyncFetchStore.computeIfAbsent(shard.shardId(), shardId -> new InternalAsyncFetch<>(logger, "shard_store", shard.shardId(), storeAction)); AsyncShardFetch.FetchResult shardStores = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); if (shardStores.hasData()) { diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 2200ed1b4f2..45292d43c8e 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -183,7 +183,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { // this shard will be picked up when the node joins and we do another allocation reroute logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", unassignedShard.index(), unassignedShard.id(), nodeShardsResult.allocationsFound); - return AllocateUnassignedDecision.no(AllocationStatus.NO_VALID_SHARD_COPY, null); + return AllocateUnassignedDecision.no(AllocationStatus.NO_VALID_SHARD_COPY, explain ? new ArrayList<>() : null); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java index 2f60c448e7c..a5309e7e746 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java @@ -70,10 +70,12 @@ public class RestClusterAllocationExplainAction extends BaseRestHandler { try { req.includeYesDecisions(request.paramAsBoolean("include_yes_decisions", false)); req.includeDiskInfo(request.paramAsBoolean("include_disk_info", false)); + final boolean humanReadable = request.paramAsBoolean("human", false); return channel -> client.admin().cluster().allocationExplain(req, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(ClusterAllocationExplainResponse response, XContentBuilder builder) throws Exception { + builder.humanReadable(humanReadable); response.getExplanation().toXContent(builder, ToXContent.EMPTY_PARAMS); return new BytesRestResponse(RestStatus.OK, builder); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java new file mode 100644 index 00000000000..0f8f71a3af8 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java @@ -0,0 +1,170 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.allocation; + +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.gateway.TestGatewayAllocator; + +import java.util.Collections; +import java.util.Locale; + +import static org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction.findShardToExplain; + +/** + * Tests for the {@link TransportClusterAllocationExplainAction} class. + */ +public class ClusterAllocationExplainActionTests extends ESTestCase { + + private static final AllocationDeciders NOOP_DECIDERS = new AllocationDeciders(Settings.EMPTY, Collections.emptyList()); + + public void testInitializingOrRelocatingShardExplanation() throws Exception { + ShardRoutingState shardRoutingState = randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.RELOCATING); + ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), shardRoutingState); + ShardRouting shard = clusterState.getRoutingTable().index("idx").shard(0).primaryShard(); + RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), + clusterState.getRoutingNodes(), clusterState, null, System.nanoTime(), randomBoolean()); + ClusterAllocationExplanation cae = TransportClusterAllocationExplainAction.explainShard(shard, allocation, null, randomBoolean(), + new TestGatewayAllocator(), new ShardsAllocator() { + @Override + public void allocate(RoutingAllocation allocation) { + // no-op + } + + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + if (shard.initializing() || shard.relocating()) { + return ShardAllocationDecision.NOT_TAKEN; + } else { + throw new UnsupportedOperationException("cannot explain"); + } + } + }); + + assertEquals(shard.currentNodeId(), cae.getCurrentNode().getId()); + assertFalse(cae.getShardAllocationDecision().isDecisionTaken()); + assertFalse(cae.getShardAllocationDecision().getAllocateDecision().isDecisionTaken()); + assertFalse(cae.getShardAllocationDecision().getMoveDecision().isDecisionTaken()); + XContentBuilder builder = XContentFactory.jsonBuilder(); + cae.toXContent(builder, ToXContent.EMPTY_PARAMS); + String explanation; + if (shardRoutingState == ShardRoutingState.RELOCATING) { + explanation = "the shard is in the process of relocating from node [] to node [], wait until " + + "relocation has completed"; + } else { + explanation = "the shard is in the process of initializing on node [], " + + "wait until initialization has completed"; + } + assertEquals("{\"index\":\"idx\",\"shard\":0,\"primary\":true,\"current_state\":\"" + + shardRoutingState.toString().toLowerCase(Locale.ROOT) + "\",\"current_node\":" + + "{\"id\":\"" + cae.getCurrentNode().getId() + "\",\"name\":\"" + cae.getCurrentNode().getName() + + "\",\"transport_address\":\"" + cae.getCurrentNode().getAddress() + + "\"},\"explanation\":\"" + explanation + "\"}", builder.string()); + } + + public void testFindAnyUnassignedShardToExplain() { + // find unassigned primary + ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.UNASSIGNED); + ClusterAllocationExplainRequest request = new ClusterAllocationExplainRequest(); + ShardRouting shard = findShardToExplain(request, routingAllocation(clusterState)); + assertEquals(clusterState.getRoutingTable().index("idx").shard(0).primaryShard(), shard); + + // find unassigned replica + clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED, ShardRoutingState.UNASSIGNED); + request = new ClusterAllocationExplainRequest(); + shard = findShardToExplain(request, routingAllocation(clusterState)); + assertEquals(clusterState.getRoutingTable().index("idx").shard(0).replicaShards().get(0), shard); + + // no unassigned shard to explain + final ClusterState allStartedClusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), + ShardRoutingState.STARTED, ShardRoutingState.STARTED); + final ClusterAllocationExplainRequest anyUnassignedShardsRequest = new ClusterAllocationExplainRequest(); + expectThrows(IllegalStateException.class, () -> + findShardToExplain(anyUnassignedShardsRequest, routingAllocation(allStartedClusterState))); + } + + public void testFindPrimaryShardToExplain() { + ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), randomFrom(ShardRoutingState.values())); + ClusterAllocationExplainRequest request = new ClusterAllocationExplainRequest("idx", 0, true, null); + ShardRouting shard = findShardToExplain(request, routingAllocation(clusterState)); + assertEquals(clusterState.getRoutingTable().index("idx").shard(0).primaryShard(), shard); + } + + public void testFindAnyReplicaToExplain() { + // prefer unassigned replicas to started replicas + ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED, + ShardRoutingState.STARTED, ShardRoutingState.UNASSIGNED); + ClusterAllocationExplainRequest request = new ClusterAllocationExplainRequest("idx", 0, false, null); + ShardRouting shard = findShardToExplain(request, routingAllocation(clusterState)); + assertEquals(clusterState.getRoutingTable().index("idx").shard(0).replicaShards() + .stream().filter(ShardRouting::unassigned).findFirst().get(), shard); + + // prefer started replicas to initializing/relocating replicas + clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED, + randomFrom(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING), ShardRoutingState.STARTED); + request = new ClusterAllocationExplainRequest("idx", 0, false, null); + shard = findShardToExplain(request, routingAllocation(clusterState)); + assertEquals(clusterState.getRoutingTable().index("idx").shard(0).replicaShards() + .stream().filter(ShardRouting::started).findFirst().get(), shard); + } + + public void testFindShardAssignedToNode() { + // find shard with given node + final boolean primary = randomBoolean(); + ShardRoutingState[] replicaStates = new ShardRoutingState[0]; + if (primary == false) { + replicaStates = new ShardRoutingState[] { ShardRoutingState.STARTED }; + } + ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED, replicaStates); + ShardRouting shardToExplain = primary ? clusterState.getRoutingTable().index("idx").shard(0).primaryShard() : + clusterState.getRoutingTable().index("idx").shard(0).replicaShards().get(0); + ClusterAllocationExplainRequest request = new ClusterAllocationExplainRequest("idx", 0, primary, shardToExplain.currentNodeId()); + RoutingAllocation allocation = routingAllocation(clusterState); + ShardRouting foundShard = findShardToExplain(request, allocation); + assertEquals(shardToExplain, foundShard); + + // shard is not assigned to given node + String explainNode = null; + for (RoutingNode routingNode : clusterState.getRoutingNodes()) { + if (routingNode.nodeId().equals(shardToExplain.currentNodeId()) == false) { + explainNode = routingNode.nodeId(); + break; + } + } + final ClusterAllocationExplainRequest failingRequest = new ClusterAllocationExplainRequest("idx", 0, primary, explainNode); + expectThrows(IllegalStateException.class, () -> findShardToExplain(failingRequest, allocation)); + } + + private static RoutingAllocation routingAllocation(ClusterState clusterState) { + return new RoutingAllocation(NOOP_DECIDERS, clusterState.getRoutingNodes(), clusterState, null, System.nanoTime(), randomBoolean()); + } +} diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index f6ba7d9f022..f4ce1ec1baa 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -19,150 +19,1246 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; +import org.elasticsearch.cluster.routing.UnassignedInfo.Reason; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.elasticsearch.cluster.routing.allocation.AllocationDecision; +import org.elasticsearch.cluster.routing.allocation.MoveDecision; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.InternalTestCluster; +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.Set; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.isOneOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.startsWith; /** * Tests for the cluster allocation explanation */ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public final class ClusterAllocationExplainIT extends ESIntegTestCase { - @TestLogging("_root:DEBUG") - public void testDelayShards() throws Exception { + + public void testUnassignedPrimaryWithExistingIndex() throws Exception { + logger.info("--> starting 2 nodes"); + internalCluster().startNodes(2); + + logger.info("--> creating an index with 1 primary, 0 replicas"); + createIndexAndIndexData(1, 0); + + logger.info("--> stopping the node with the primary"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName())); + ensureStableCluster(1); + + boolean includeYesDecisions = randomBoolean(); + boolean includeDiskInfo = randomBoolean(); + ClusterAllocationExplanation explanation = runExplain(true, includeYesDecisions, includeDiskInfo); + + ShardId shardId = explanation.getShard(); + boolean isPrimary = explanation.isPrimary(); + ShardRoutingState shardState = explanation.getShardState(); + DiscoveryNode currentNode = explanation.getCurrentNode(); + UnassignedInfo unassignedInfo = explanation.getUnassignedInfo(); + ClusterInfo clusterInfo = explanation.getClusterInfo(); + AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision(); + MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision(); + + // verify shard info + assertEquals("idx", shardId.getIndexName()); + assertEquals(0, shardId.getId()); + assertTrue(isPrimary); + + // verify current node info + assertNotEquals(ShardRoutingState.STARTED, shardState); + assertNull(currentNode); + + // verify unassigned info + assertNotNull(unassignedInfo); + assertEquals(Reason.NODE_LEFT, unassignedInfo.getReason()); + assertTrue(unassignedInfo.getLastAllocationStatus() == AllocationStatus.FETCHING_SHARD_DATA + || unassignedInfo.getLastAllocationStatus() == AllocationStatus.NO_VALID_SHARD_COPY); + + // verify cluster info + verifyClusterInfo(clusterInfo, includeDiskInfo, 1); + + // verify decision objects + assertTrue(allocateDecision.isDecisionTaken()); + assertFalse(moveDecision.isDecisionTaken()); + assertEquals(AllocationDecision.NO_VALID_SHARD_COPY, allocateDecision.getAllocationDecision()); + assertEquals("cannot allocate because a previous copy of the primary shard existed but can no longer be " + + "found on the nodes in the cluster", allocateDecision.getExplanation()); + assertNull(allocateDecision.getAllocationId()); + assertNull(allocateDecision.getTargetNode()); + assertEquals(0L, allocateDecision.getConfiguredDelayInMillis()); + assertEquals(0L, allocateDecision.getRemainingDelayInMillis()); + assertEquals(0, allocateDecision.getNodeDecisions().size()); + + // verify JSON output + try (XContentParser parser = getParser(explanation)) { + verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.UNASSIGNED); + parser.nextToken(); + assertEquals("can_allocate", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.NO_VALID_SHARD_COPY.toString(), parser.text()); + parser.nextToken(); + assertEquals("allocate_explanation", parser.currentName()); + parser.nextToken(); + assertEquals("cannot allocate because a previous copy of the primary shard existed but can no longer be found " + + "on the nodes in the cluster", parser.text()); + assertEquals(Token.END_OBJECT, parser.nextToken()); + } + } + + public void testUnassignedReplicaDelayedAllocation() throws Exception { logger.info("--> starting 3 nodes"); internalCluster().startNodes(3); - // Wait for all 3 nodes to be up - logger.info("--> waiting for 3 nodes to be up"); - ensureStableCluster(3); - - logger.info("--> creating 'test' index"); - assertAcked(prepareCreate("test").setSettings(Settings.builder() - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "1m") - .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 5) - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)) - .setWaitForActiveShards(ActiveShardCount.ALL).get()); - - logger.info("--> stopping a random node"); - assertTrue(internalCluster().stopRandomDataNode()); - - logger.info("--> waiting for the master to remove the stopped node from the cluster state"); + logger.info("--> creating an index with 1 primary, 1 replica"); + createIndexAndIndexData(1, 1); + logger.info("--> stopping the node with the replica"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode().getName())); ensureStableCluster(2); + assertBusy(() -> + // wait till we have passed any pending shard data fetching + assertEquals(AllocationDecision.ALLOCATION_DELAYED, client().admin().cluster().prepareAllocationExplain() + .setIndex("idx").setShard(0).setPrimary(false).get().getExplanation() + .getShardAllocationDecision().getAllocateDecision().getAllocationDecision()) + ); - ClusterAllocationExplainResponse resp = client().admin().cluster().prepareAllocationExplain().useAnyUnassignedShard().get(); - ClusterAllocationExplanation cae = resp.getExplanation(); - assertThat(cae.getShard().getIndexName(), equalTo("test")); - assertFalse(cae.isPrimary()); - assertFalse(cae.isAssigned()); - assertThat("expecting a remaining delay, got: " + cae.getRemainingDelayMillis(), cae.getRemainingDelayMillis(), greaterThan(0L)); - } + logger.info("--> observing delayed allocation..."); + boolean includeYesDecisions = randomBoolean(); + boolean includeDiskInfo = randomBoolean(); - public void testUnassignedShards() throws Exception { - logger.info("--> starting 3 nodes"); - String noAttrNode = internalCluster().startNode(); - String barAttrNode = internalCluster().startNode(Settings.builder().put("node.attr.bar", "baz")); - String fooBarAttrNode = internalCluster().startNode(Settings.builder() - .put("node.attr.foo", "bar") - .put("node.attr.bar", "baz")); + ClusterAllocationExplanation explanation = runExplain(false, includeYesDecisions, includeDiskInfo); - // Wait for all 3 nodes to be up - logger.info("--> waiting for 3 nodes to be up"); - client().admin().cluster().health(Requests.clusterHealthRequest().waitForNodes("3")).actionGet(); + ShardId shardId = explanation.getShard(); + boolean isPrimary = explanation.isPrimary(); + ShardRoutingState shardRoutingState = explanation.getShardState(); + DiscoveryNode currentNode = explanation.getCurrentNode(); + UnassignedInfo unassignedInfo = explanation.getUnassignedInfo(); + ClusterInfo clusterInfo = explanation.getClusterInfo(); + AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision(); + MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision(); - client().admin().indices().prepareCreate("anywhere") - .setSettings(Settings.builder() - .put("index.number_of_shards", 5) - .put("index.number_of_replicas", 1)) - .setWaitForActiveShards(ActiveShardCount.ALL) // wait on all shards - .get(); + // verify shard info + assertEquals("idx", shardId.getIndexName()); + assertEquals(0, shardId.getId()); + assertFalse(isPrimary); - client().admin().indices().prepareCreate("only-baz") - .setSettings(Settings.builder() - .put("index.routing.allocation.include.bar", "baz") - .put("index.number_of_shards", 5) - .put("index.number_of_replicas", 1)) - .setWaitForActiveShards(ActiveShardCount.ALL) - .get(); + // verify current node info + assertNotEquals(ShardRoutingState.STARTED, shardRoutingState); + assertNull(currentNode); - client().admin().indices().prepareCreate("only-foo") - .setSettings(Settings.builder() - .put("index.routing.allocation.include.foo", "bar") - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 1)) - .get(); + // verify unassigned info + assertNotNull(unassignedInfo); + assertEquals(Reason.NODE_LEFT, unassignedInfo.getReason()); + assertEquals(AllocationStatus.NO_ATTEMPT, unassignedInfo.getLastAllocationStatus()); - ClusterAllocationExplainResponse resp = client().admin().cluster().prepareAllocationExplain() - .setIndex("only-foo") - .setShard(0) - .setPrimary(false) - .get(); - ClusterAllocationExplanation cae = resp.getExplanation(); - assertThat(cae.getShard().getIndexName(), equalTo("only-foo")); - assertFalse(cae.isPrimary()); - assertFalse(cae.isAssigned()); - assertFalse(cae.isStillFetchingShardData()); - assertThat(UnassignedInfo.Reason.INDEX_CREATED, equalTo(cae.getUnassignedInfo().getReason())); - assertThat("expecting no remaining delay: " + cae.getRemainingDelayMillis(), cae.getRemainingDelayMillis(), equalTo(0L)); + // verify cluster info + verifyClusterInfo(clusterInfo, includeDiskInfo, 2); - Map explanations = cae.getNodeExplanations(); - - Float barAttrWeight = -1f; - Float fooBarAttrWeight = -1f; - for (Map.Entry entry : explanations.entrySet()) { - DiscoveryNode node = entry.getKey(); - String nodeName = node.getName(); - NodeExplanation explanation = entry.getValue(); - ClusterAllocationExplanation.FinalDecision finalDecision = explanation.getFinalDecision(); - ClusterAllocationExplanation.StoreCopy storeCopy = explanation.getStoreCopy(); - Decision d = explanation.getDecision(); - float weight = explanation.getWeight(); - IndicesShardStoresResponse.StoreStatus storeStatus = explanation.getStoreStatus(); - - assertEquals(d.type(), Decision.Type.NO); - if (noAttrNode.equals(nodeName)) { - assertThat(d.toString(), containsString("node does not match index setting [index.routing.allocation.include] " + - "filters [foo:\"bar\"]")); - assertNull(storeStatus); - assertEquals("the shard cannot be assigned because one or more allocation decider returns a 'NO' decision", - explanation.getFinalExplanation()); - assertEquals(ClusterAllocationExplanation.FinalDecision.NO, finalDecision); - } else if (barAttrNode.equals(nodeName)) { - assertThat(d.toString(), containsString("node does not match index setting [index.routing.allocation.include] " + - "filters [foo:\"bar\"]")); - barAttrWeight = weight; - assertNull(storeStatus); - assertEquals("the shard cannot be assigned because one or more allocation decider returns a 'NO' decision", - explanation.getFinalExplanation()); - assertEquals(ClusterAllocationExplanation.FinalDecision.NO, finalDecision); - } else if (fooBarAttrNode.equals(nodeName)) { - assertThat(d.toString(), containsString("the shard cannot be allocated to the same node")); - fooBarAttrWeight = weight; - assertEquals(storeStatus.getAllocationStatus(), - IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY); - assertEquals(ClusterAllocationExplanation.FinalDecision.NO, finalDecision); - assertEquals(ClusterAllocationExplanation.StoreCopy.AVAILABLE, storeCopy); - assertEquals("the shard cannot be assigned because one or more allocation decider returns a 'NO' decision", - explanation.getFinalExplanation()); + // verify decision objects + assertTrue(allocateDecision.isDecisionTaken()); + assertFalse(moveDecision.isDecisionTaken()); + assertEquals(AllocationDecision.ALLOCATION_DELAYED, allocateDecision.getAllocationDecision()); + assertThat(allocateDecision.getExplanation(), startsWith("cannot allocate because the cluster is still waiting")); + assertThat(allocateDecision.getExplanation(), containsString( + "despite being allowed to allocate the shard to at least one other node")); + assertNull(allocateDecision.getAllocationId()); + assertNull(allocateDecision.getTargetNode()); + assertEquals(60000L, allocateDecision.getConfiguredDelayInMillis()); + assertThat(allocateDecision.getRemainingDelayInMillis(), greaterThan(0L)); + assertEquals(2, allocateDecision.getNodeDecisions().size()); + String primaryNodeName = primaryNodeName(); + for (NodeAllocationResult result : allocateDecision.getNodeDecisions()) { + assertNotNull(result.getNode()); + boolean nodeHoldingPrimary = result.getNode().getName().equals(primaryNodeName); + if (nodeHoldingPrimary) { + // shouldn't be able to allocate to the same node as the primary, the same shard decider should say no + assertEquals(AllocationDecision.NO, result.getNodeDecision()); + assertThat(result.getShardStoreInfo().getMatchingBytes(), greaterThan(0L)); } else { - fail("unexpected node with name: " + nodeName + - ", I have: " + noAttrNode + ", " + barAttrNode + ", " + fooBarAttrNode); + assertEquals(AllocationDecision.YES, result.getNodeDecision()); + assertNull(result.getShardStoreInfo()); + } + if (includeYesDecisions) { + assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(1)); + } else { + // if we are not including YES decisions, then the node holding the primary should have 1 NO decision, + // the other node should have zero NO decisions + assertEquals(nodeHoldingPrimary ? 1 : 0, result.getCanAllocateDecision().getDecisions().size()); + } + for (Decision d : result.getCanAllocateDecision().getDecisions()) { + if (d.label().equals("same_shard") && nodeHoldingPrimary) { + assertEquals(Decision.Type.NO, d.type()); + assertThat(d.getExplanation(), startsWith( + "the shard cannot be allocated to the same node on which a copy of the shard already exists")); + } else { + assertEquals(Decision.Type.YES, d.type()); + assertNotNull(d.getExplanation()); + } } } - assertFalse(barAttrWeight == fooBarAttrWeight); + + // verify JSON output + try (XContentParser parser = getParser(explanation)) { + verifyShardInfo(parser, false, includeDiskInfo, ShardRoutingState.UNASSIGNED); + parser.nextToken(); + assertEquals("can_allocate", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.ALLOCATION_DELAYED.toString(), parser.text()); + parser.nextToken(); + assertEquals("allocate_explanation", parser.currentName()); + parser.nextToken(); + assertThat(parser.text(), startsWith("cannot allocate because the cluster is still waiting")); + parser.nextToken(); + assertEquals("configured_delay_in_millis", parser.currentName()); + parser.nextToken(); + assertEquals(60000L, parser.longValue()); + parser.nextToken(); + assertEquals("remaining_delay_in_millis", parser.currentName()); + parser.nextToken(); + assertThat(parser.longValue(), greaterThan(0L)); + Map nodes = new HashMap<>(); + nodes.put(primaryNodeName, AllocationDecision.NO); + String[] currentNodes = internalCluster().getNodeNames(); + nodes.put(currentNodes[0].equals(primaryNodeName) ? currentNodes[1] : currentNodes[0], AllocationDecision.YES); + verifyNodeDecisions(parser, nodes, includeYesDecisions, true); + assertEquals(Token.END_OBJECT, parser.nextToken()); + } } + + public void testUnassignedReplicaWithPriorCopy() throws Exception { + logger.info("--> starting 3 nodes"); + List nodes = internalCluster().startNodes(3); + + logger.info("--> creating an index with 1 primary and 1 replica"); + createIndexAndIndexData(1, 1); + String primaryNodeName = primaryNodeName(); + nodes.remove(primaryNodeName); + + logger.info("--> shutting down all nodes except the one that holds the primary"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(0))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(1))); + ensureStableCluster(1); + + logger.info("--> setting allocation filtering to only allow allocation on the currently running node"); + client().admin().indices().prepareUpdateSettings("idx").setSettings( + Settings.builder().put("index.routing.allocation.include._name", primaryNodeName)).get(); + + logger.info("--> restarting the stopped nodes"); + internalCluster().startNode(Settings.builder().put("node.name", nodes.get(0)).build()); + internalCluster().startNode(Settings.builder().put("node.name", nodes.get(1)).build()); + ensureStableCluster(3); + + boolean includeYesDecisions = randomBoolean(); + boolean includeDiskInfo = randomBoolean(); + assertBusy(() -> { + if (includeDiskInfo) { + // wait till all cluster info is ready + assertEquals(3, client().admin().cluster().prepareAllocationExplain() + .setIndex("idx").setShard(0).setPrimary(true).setIncludeDiskInfo(true).get() + .getExplanation().getClusterInfo().getNodeLeastAvailableDiskUsages().size()); + } + }); + ClusterAllocationExplanation explanation = runExplain(false, includeYesDecisions, includeDiskInfo); + + ShardId shardId = explanation.getShard(); + boolean isPrimary = explanation.isPrimary(); + ShardRoutingState shardRoutingState = explanation.getShardState(); + DiscoveryNode currentNode = explanation.getCurrentNode(); + UnassignedInfo unassignedInfo = explanation.getUnassignedInfo(); + ClusterInfo clusterInfo = explanation.getClusterInfo(); + AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision(); + MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision(); + + // verify shard info + assertEquals("idx", shardId.getIndexName()); + assertEquals(0, shardId.getId()); + assertFalse(isPrimary); + + // verify current node info + assertNotEquals(ShardRoutingState.STARTED, shardRoutingState); + assertNull(currentNode); + + // verify unassigned info + assertNotNull(unassignedInfo); + assertEquals(Reason.NODE_LEFT, unassignedInfo.getReason()); + assertEquals(AllocationStatus.NO_ATTEMPT, unassignedInfo.getLastAllocationStatus()); + + // verify cluster info + verifyClusterInfo(clusterInfo, includeDiskInfo, 3); + + // verify decision objects + assertTrue(allocateDecision.isDecisionTaken()); + assertFalse(moveDecision.isDecisionTaken()); + AllocationDecision decisionToAllocate = allocateDecision.getAllocationDecision(); + assertTrue(decisionToAllocate == AllocationDecision.AWAITING_INFO || decisionToAllocate == AllocationDecision.NO); + if (decisionToAllocate == AllocationDecision.AWAITING_INFO) { + assertEquals("cannot allocate because information about existing shard data is still being retrieved from some of the nodes", + allocateDecision.getExplanation()); + } else { + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", allocateDecision.getExplanation()); + } + assertNull(allocateDecision.getAllocationId()); + assertNull(allocateDecision.getTargetNode()); + assertEquals(0L, allocateDecision.getConfiguredDelayInMillis()); + assertEquals(0L, allocateDecision.getRemainingDelayInMillis()); + assertEquals(3, allocateDecision.getNodeDecisions().size()); + for (NodeAllocationResult result : allocateDecision.getNodeDecisions()) { + assertNotNull(result.getNode()); + boolean nodeHoldingPrimary = result.getNode().getName().equals(primaryNodeName); + assertEquals(AllocationDecision.NO, result.getNodeDecision()); + if (includeYesDecisions) { + assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(1)); + } else { + assertEquals(1, result.getCanAllocateDecision().getDecisions().size()); + } + for (Decision d : result.getCanAllocateDecision().getDecisions()) { + if (d.label().equals("same_shard") && nodeHoldingPrimary) { + assertEquals(Decision.Type.NO, d.type()); + assertThat(d.getExplanation(), startsWith( + "the shard cannot be allocated to the same node on which a copy of the shard already exists")); + } else if (d.label().equals("filter") && nodeHoldingPrimary == false) { + assertEquals(Decision.Type.NO, d.type()); + assertEquals("node does not match index setting [index.routing.allocation.include] " + + "filters [_name:\"" + primaryNodeName + "\"]", d.getExplanation()); + } else { + assertEquals(Decision.Type.YES, d.type()); + assertNotNull(d.getExplanation()); + } + } + } + + // verify JSON output + try (XContentParser parser = getParser(explanation)) { + verifyShardInfo(parser, false, includeDiskInfo, ShardRoutingState.UNASSIGNED); + parser.nextToken(); + assertEquals("can_allocate", parser.currentName()); + parser.nextToken(); + String allocationDecision = parser.text(); + assertTrue(allocationDecision.equals(AllocationDecision.NO.toString()) + || allocationDecision.equals(AllocationDecision.AWAITING_INFO.toString())); + parser.nextToken(); + assertEquals("allocate_explanation", parser.currentName()); + parser.nextToken(); + if (allocationDecision.equals("awaiting_info")) { + assertEquals("cannot allocate because information about existing shard data is still being retrieved " + + "from some of the nodes", parser.text()); + } else { + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", parser.text()); + } + Map nodeDecisions = new HashMap<>(); + for (String nodeName : internalCluster().getNodeNames()) { + nodeDecisions.put(nodeName, AllocationDecision.NO); + } + verifyNodeDecisions(parser, nodeDecisions, includeYesDecisions, true); + assertEquals(Token.END_OBJECT, parser.nextToken()); + } + } + + public void testAllocationFilteringOnIndexCreation() throws Exception { + logger.info("--> starting 2 nodes"); + internalCluster().startNodes(2); + + logger.info("--> creating an index with 1 primary, 0 replicas, with allocation filtering so the primary can't be assigned"); + createIndexAndIndexData(1, 0, Settings.builder().put("index.routing.allocation.include._name", "non_existent_node").build(), + ActiveShardCount.NONE); + + boolean includeYesDecisions = randomBoolean(); + boolean includeDiskInfo = randomBoolean(); + ClusterAllocationExplanation explanation = runExplain(true, includeYesDecisions, includeDiskInfo); + + ShardId shardId = explanation.getShard(); + boolean isPrimary = explanation.isPrimary(); + ShardRoutingState shardRoutingState = explanation.getShardState(); + DiscoveryNode currentNode = explanation.getCurrentNode(); + UnassignedInfo unassignedInfo = explanation.getUnassignedInfo(); + ClusterInfo clusterInfo = explanation.getClusterInfo(); + AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision(); + MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision(); + + // verify shard info + assertEquals("idx", shardId.getIndexName()); + assertEquals(0, shardId.getId()); + assertTrue(isPrimary); + + // verify current node info + assertNotEquals(ShardRoutingState.STARTED, shardRoutingState); + assertNull(currentNode); + + // verify unassigned info + assertNotNull(unassignedInfo); + assertEquals(Reason.INDEX_CREATED, unassignedInfo.getReason()); + assertEquals(AllocationStatus.DECIDERS_NO, unassignedInfo.getLastAllocationStatus()); + + // verify cluster info + verifyClusterInfo(clusterInfo, includeDiskInfo, 2); + + // verify decision objects + assertTrue(allocateDecision.isDecisionTaken()); + assertFalse(moveDecision.isDecisionTaken()); + assertEquals(AllocationDecision.NO, allocateDecision.getAllocationDecision()); + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", allocateDecision.getExplanation()); + assertNull(allocateDecision.getAllocationId()); + assertNull(allocateDecision.getTargetNode()); + assertEquals(0L, allocateDecision.getConfiguredDelayInMillis()); + assertEquals(0L, allocateDecision.getRemainingDelayInMillis()); + assertEquals(2, allocateDecision.getNodeDecisions().size()); + for (NodeAllocationResult result : allocateDecision.getNodeDecisions()) { + assertNotNull(result.getNode()); + assertEquals(AllocationDecision.NO, result.getNodeDecision()); + if (includeYesDecisions) { + assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(1)); + } else { + assertEquals(1, result.getCanAllocateDecision().getDecisions().size()); + } + for (Decision d : result.getCanAllocateDecision().getDecisions()) { + if (d.label().equals("filter")) { + assertEquals(Decision.Type.NO, d.type()); + assertEquals("node does not match index setting [index.routing.allocation.include] filters " + + "[_name:\"non_existent_node\"]", d.getExplanation()); + } + } + } + + // verify JSON output + try (XContentParser parser = getParser(explanation)) { + verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.UNASSIGNED); + parser.nextToken(); + assertEquals("can_allocate", parser.currentName()); + parser.nextToken(); + String allocationDecision = parser.text(); + assertTrue(allocationDecision.equals(AllocationDecision.NO.toString()) + || allocationDecision.equals(AllocationDecision.AWAITING_INFO.toString())); + parser.nextToken(); + assertEquals("allocate_explanation", parser.currentName()); + parser.nextToken(); + if (allocationDecision.equals("awaiting_info")) { + assertEquals("cannot allocate because information about existing shard data is still being retrieved " + + "from some of the nodes", parser.text()); + } else { + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", parser.text()); + } + Map nodeDecisions = new HashMap<>(); + for (String nodeName : internalCluster().getNodeNames()) { + nodeDecisions.put(nodeName, AllocationDecision.NO); + } + verifyNodeDecisions(parser, nodeDecisions, includeYesDecisions, false); + assertEquals(Token.END_OBJECT, parser.nextToken()); + } + } + + public void testAllocationFilteringPreventsShardMove() throws Exception { + logger.info("--> starting 2 nodes"); + internalCluster().startNodes(2); + + logger.info("--> creating an index with 1 primary and 0 replicas"); + createIndexAndIndexData(1, 0); + + logger.info("--> setting up allocation filtering to prevent allocation to both nodes"); + client().admin().indices().prepareUpdateSettings("idx").setSettings( + Settings.builder().put("index.routing.allocation.include._name", "non_existent_node")).get(); + + boolean includeYesDecisions = randomBoolean(); + boolean includeDiskInfo = randomBoolean(); + ClusterAllocationExplanation explanation = runExplain(true, includeYesDecisions, includeDiskInfo); + + ShardId shardId = explanation.getShard(); + boolean isPrimary = explanation.isPrimary(); + ShardRoutingState shardRoutingState = explanation.getShardState(); + DiscoveryNode currentNode = explanation.getCurrentNode(); + UnassignedInfo unassignedInfo = explanation.getUnassignedInfo(); + ClusterInfo clusterInfo = explanation.getClusterInfo(); + AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision(); + MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision(); + + // verify shard info + assertEquals("idx", shardId.getIndexName()); + assertEquals(0, shardId.getId()); + assertTrue(isPrimary); + + // verify current node info + assertEquals(ShardRoutingState.STARTED, shardRoutingState); + assertNotNull(currentNode); + + // verify unassigned info + assertNull(unassignedInfo); + + // verify cluster info + verifyClusterInfo(clusterInfo, includeDiskInfo, 2); + + // verify decision object + assertFalse(allocateDecision.isDecisionTaken()); + assertTrue(moveDecision.isDecisionTaken()); + assertEquals(AllocationDecision.NO, moveDecision.getAllocationDecision()); + assertEquals("cannot move shard to another node, even though it is not allowed to remain on its current node", + moveDecision.getExplanation()); + assertFalse(moveDecision.canRemain()); + assertFalse(moveDecision.forceMove()); + assertFalse(moveDecision.canRebalanceCluster()); + assertNull(moveDecision.getClusterRebalanceDecision()); + assertNull(moveDecision.getTargetNode()); + assertEquals(0, moveDecision.getCurrentNodeRanking()); + // verifying can remain decision object + assertNotNull(moveDecision.getCanRemainDecision()); + assertEquals(Decision.Type.NO, moveDecision.getCanRemainDecision().type()); + for (Decision d : moveDecision.getCanRemainDecision().getDecisions()) { + if (d.label().equals("filter")) { + assertEquals(Decision.Type.NO, d.type()); + assertEquals("node does not match index setting [index.routing.allocation.include] filters [_name:\"non_existent_node\"]", + d.getExplanation()); + } else { + assertEquals(Decision.Type.YES, d.type()); + assertNotNull(d.getExplanation()); + } + } + // verify node decisions + assertEquals(1, moveDecision.getNodeDecisions().size()); + NodeAllocationResult result = moveDecision.getNodeDecisions().get(0); + assertNotNull(result.getNode()); + assertEquals(1, result.getWeightRanking()); + assertEquals(AllocationDecision.NO, result.getNodeDecision()); + if (includeYesDecisions) { + assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(1)); + } else { + assertEquals(1, result.getCanAllocateDecision().getDecisions().size()); + } + for (Decision d : result.getCanAllocateDecision().getDecisions()) { + if (d.label().equals("filter")) { + assertEquals(Decision.Type.NO, d.type()); + assertEquals("node does not match index setting [index.routing.allocation.include] filters [_name:\"non_existent_node\"]", + d.getExplanation()); + } else { + assertEquals(Decision.Type.YES, d.type()); + assertNotNull(d.getExplanation()); + } + } + + // verify JSON output + try (XContentParser parser = getParser(explanation)) { + verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.STARTED); + parser.nextToken(); + assertEquals("can_remain_on_current_node", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.NO.toString(), parser.text()); + parser.nextToken(); + assertEquals("can_remain_decisions", parser.currentName()); + verifyDeciders(parser, AllocationDecision.NO); + parser.nextToken(); + assertEquals("can_move_to_other_node", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.NO.toString(), parser.text()); + parser.nextToken(); + assertEquals("move_explanation", parser.currentName()); + parser.nextToken(); + assertEquals("cannot move shard to another node, even though it is not allowed to remain on its current node", parser.text()); + verifyNodeDecisions(parser, allNodeDecisions(AllocationDecision.NO, true), includeYesDecisions, false); + assertEquals(Token.END_OBJECT, parser.nextToken()); + } + } + + public void testRebalancingNotAllowed() throws Exception { + logger.info("--> starting a single node"); + internalCluster().startNode(); + ensureStableCluster(1); + + logger.info("--> creating an index with 5 shards, all allocated to the single node"); + createIndexAndIndexData(5, 0); + + logger.info("--> disabling rebalancing on the index"); + client().admin().indices().prepareUpdateSettings("idx").setSettings( + Settings.builder().put("index.routing.rebalance.enable", "none")).get(); + + logger.info("--> starting another node, with rebalancing disabled, it should get no shards"); + internalCluster().startNode(); + ensureStableCluster(2); + + boolean includeYesDecisions = randomBoolean(); + boolean includeDiskInfo = randomBoolean(); + ClusterAllocationExplanation explanation = runExplain(true, includeYesDecisions, includeDiskInfo); + + ShardId shardId = explanation.getShard(); + boolean isPrimary = explanation.isPrimary(); + ShardRoutingState shardRoutingState = explanation.getShardState(); + DiscoveryNode currentNode = explanation.getCurrentNode(); + UnassignedInfo unassignedInfo = explanation.getUnassignedInfo(); + ClusterInfo clusterInfo = explanation.getClusterInfo(); + AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision(); + MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision(); + + // verify shard info + assertEquals("idx", shardId.getIndexName()); + assertEquals(0, shardId.getId()); + assertTrue(isPrimary); + + // verify current node info + assertEquals(ShardRoutingState.STARTED, shardRoutingState); + assertNotNull(currentNode); + + // verify unassigned info + assertNull(unassignedInfo); + + // verify cluster info + verifyClusterInfo(clusterInfo, includeDiskInfo, 2); + + // verify decision object + assertFalse(allocateDecision.isDecisionTaken()); + assertTrue(moveDecision.isDecisionTaken()); + assertEquals(AllocationDecision.NO, moveDecision.getAllocationDecision()); + assertEquals("rebalancing is not allowed, even though there is at least one node on which the shard can be allocated", + moveDecision.getExplanation()); + assertTrue(moveDecision.canRemain()); + assertFalse(moveDecision.forceMove()); + assertFalse(moveDecision.canRebalanceCluster()); + assertNotNull(moveDecision.getCanRemainDecision()); + assertNull(moveDecision.getTargetNode()); + assertEquals(2, moveDecision.getCurrentNodeRanking()); + // verifying cluster rebalance decision object + assertNotNull(moveDecision.getClusterRebalanceDecision()); + assertEquals(Decision.Type.NO, moveDecision.getClusterRebalanceDecision().type()); + for (Decision d : moveDecision.getClusterRebalanceDecision().getDecisions()) { + if (d.label().equals("enable")) { + assertEquals(Decision.Type.NO, d.type()); + assertEquals("no rebalancing is allowed due to index setting [index.routing.rebalance.enable=none]", + d.getExplanation()); + } else { + assertEquals(Decision.Type.YES, d.type()); + assertNotNull(d.getExplanation()); + } + } + // verify node decisions + assertEquals(1, moveDecision.getNodeDecisions().size()); + NodeAllocationResult result = moveDecision.getNodeDecisions().get(0); + assertNotNull(result.getNode()); + assertEquals(1, result.getWeightRanking()); + assertEquals(AllocationDecision.YES, result.getNodeDecision()); + if (includeYesDecisions) { + assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(0)); + } else { + assertEquals(0, result.getCanAllocateDecision().getDecisions().size()); + } + for (Decision d : result.getCanAllocateDecision().getDecisions()) { + assertEquals(Decision.Type.YES, d.type()); + assertNotNull(d.getExplanation()); + } + + // verify JSON output + try (XContentParser parser = getParser(explanation)) { + verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.STARTED); + parser.nextToken(); + assertEquals("can_remain_on_current_node", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.YES.toString(), parser.text()); + parser.nextToken(); + assertEquals("can_rebalance_cluster", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.NO.toString(), parser.text()); + parser.nextToken(); + assertEquals("can_rebalance_cluster_decisions", parser.currentName()); + verifyDeciders(parser, AllocationDecision.NO); + parser.nextToken(); + assertEquals("can_rebalance_to_other_node", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.NO.toString(), parser.text()); + parser.nextToken(); + assertEquals("rebalance_explanation", parser.currentName()); + parser.nextToken(); + assertEquals("rebalancing is not allowed, even though there is at least one node on which the shard can be allocated", + parser.text()); + verifyNodeDecisions(parser, allNodeDecisions(AllocationDecision.YES, true), includeYesDecisions, false); + assertEquals(Token.END_OBJECT, parser.nextToken()); + } + } + + public void testWorseBalance() throws Exception { + logger.info("--> starting a single node"); + internalCluster().startNode(); + ensureStableCluster(1); + + logger.info("--> creating an index with 5 shards, all allocated to the single node"); + createIndexAndIndexData(5, 0); + + logger.info("--> setting balancing threshold really high, so it won't be met"); + client().admin().cluster().prepareUpdateSettings().setTransientSettings( + Settings.builder().put("cluster.routing.allocation.balance.threshold", 1000.0f)).get(); + + logger.info("--> starting another node, with the rebalance threshold so high, it should not get any shards"); + internalCluster().startNode(); + ensureStableCluster(2); + + boolean includeYesDecisions = randomBoolean(); + boolean includeDiskInfo = randomBoolean(); + ClusterAllocationExplanation explanation = runExplain(true, includeYesDecisions, includeDiskInfo); + + ShardId shardId = explanation.getShard(); + boolean isPrimary = explanation.isPrimary(); + ShardRoutingState shardRoutingState = explanation.getShardState(); + DiscoveryNode currentNode = explanation.getCurrentNode(); + UnassignedInfo unassignedInfo = explanation.getUnassignedInfo(); + ClusterInfo clusterInfo = explanation.getClusterInfo(); + AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision(); + MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision(); + + // verify shard info + assertEquals("idx", shardId.getIndexName()); + assertEquals(0, shardId.getId()); + assertTrue(isPrimary); + + // verify current node info + assertEquals(ShardRoutingState.STARTED, shardRoutingState); + assertNotNull(currentNode); + + // verify unassigned info + assertNull(unassignedInfo); + + // verify cluster info + verifyClusterInfo(clusterInfo, includeDiskInfo, 2); + + // verify decision object + assertFalse(allocateDecision.isDecisionTaken()); + assertTrue(moveDecision.isDecisionTaken()); + assertEquals(AllocationDecision.NO, moveDecision.getAllocationDecision()); + assertEquals("cannot rebalance as no target node exists that can both allocate this shard and improve the cluster balance", + moveDecision.getExplanation()); + assertTrue(moveDecision.canRemain()); + assertFalse(moveDecision.forceMove()); + assertTrue(moveDecision.canRebalanceCluster()); + assertNotNull(moveDecision.getCanRemainDecision()); + assertNull(moveDecision.getTargetNode()); + assertEquals(1, moveDecision.getCurrentNodeRanking()); + // verifying cluster rebalance decision object + assertNotNull(moveDecision.getClusterRebalanceDecision()); + assertEquals(Decision.Type.YES, moveDecision.getClusterRebalanceDecision().type()); + for (Decision d : moveDecision.getClusterRebalanceDecision().getDecisions()) { + assertEquals(Decision.Type.YES, d.type()); + assertNotNull(d.getExplanation()); + } + // verify node decisions + assertEquals(1, moveDecision.getNodeDecisions().size()); + NodeAllocationResult result = moveDecision.getNodeDecisions().get(0); + assertNotNull(result.getNode()); + assertEquals(1, result.getWeightRanking()); + assertEquals(AllocationDecision.WORSE_BALANCE, result.getNodeDecision()); + if (includeYesDecisions) { + assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(0)); + } else { + assertEquals(0, result.getCanAllocateDecision().getDecisions().size()); + } + for (Decision d : result.getCanAllocateDecision().getDecisions()) { + assertEquals(Decision.Type.YES, d.type()); + assertNotNull(d.getExplanation()); + } + + // verify JSON output + try (XContentParser parser = getParser(explanation)) { + verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.STARTED); + parser.nextToken(); + assertEquals("can_remain_on_current_node", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.YES.toString(), parser.text()); + parser.nextToken(); + assertEquals("can_rebalance_cluster", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.YES.toString(), parser.text()); + parser.nextToken(); + assertEquals("can_rebalance_to_other_node", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.NO.toString(), parser.text()); + parser.nextToken(); + assertEquals("rebalance_explanation", parser.currentName()); + parser.nextToken(); + assertEquals("cannot rebalance as no target node exists that can both allocate this shard and improve the cluster balance", + parser.text()); + verifyNodeDecisions(parser, allNodeDecisions(AllocationDecision.WORSE_BALANCE, true), includeYesDecisions, false); + assertEquals(Token.END_OBJECT, parser.nextToken()); + } + } + + public void testBetterBalanceButCannotAllocate() throws Exception { + logger.info("--> starting a single node"); + String firstNode = internalCluster().startNode(); + ensureStableCluster(1); + + logger.info("--> creating an index with 5 shards, all allocated to the single node"); + createIndexAndIndexData(5, 0); + + logger.info("--> setting up allocation filtering to only allow allocation to the current node"); + client().admin().indices().prepareUpdateSettings("idx").setSettings( + Settings.builder().put("index.routing.allocation.include._name", firstNode)).get(); + + logger.info("--> starting another node, with filtering not allowing allocation to the new node, it should not get any shards"); + internalCluster().startNode(); + ensureStableCluster(2); + + boolean includeYesDecisions = randomBoolean(); + boolean includeDiskInfo = randomBoolean(); + ClusterAllocationExplanation explanation = runExplain(true, includeYesDecisions, includeDiskInfo); + + ShardId shardId = explanation.getShard(); + boolean isPrimary = explanation.isPrimary(); + ShardRoutingState shardRoutingState = explanation.getShardState(); + DiscoveryNode currentNode = explanation.getCurrentNode(); + UnassignedInfo unassignedInfo = explanation.getUnassignedInfo(); + ClusterInfo clusterInfo = explanation.getClusterInfo(); + AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision(); + MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision(); + + // verify shard info + assertEquals("idx", shardId.getIndexName()); + assertEquals(0, shardId.getId()); + assertTrue(isPrimary); + + // verify current node info + assertEquals(ShardRoutingState.STARTED, shardRoutingState); + assertNotNull(currentNode); + + // verify unassigned info + assertNull(unassignedInfo); + + // verify cluster info + verifyClusterInfo(clusterInfo, includeDiskInfo, 2); + + // verify decision object + assertFalse(allocateDecision.isDecisionTaken()); + assertTrue(moveDecision.isDecisionTaken()); + assertEquals(AllocationDecision.NO, moveDecision.getAllocationDecision()); + assertEquals("cannot rebalance as no target node exists that can both allocate this shard and improve the cluster balance", + moveDecision.getExplanation()); + assertTrue(moveDecision.canRemain()); + assertFalse(moveDecision.forceMove()); + assertTrue(moveDecision.canRebalanceCluster()); + assertNotNull(moveDecision.getCanRemainDecision()); + assertNull(moveDecision.getTargetNode()); + assertEquals(2, moveDecision.getCurrentNodeRanking()); + // verifying cluster rebalance decision object + assertNotNull(moveDecision.getClusterRebalanceDecision()); + assertEquals(Decision.Type.YES, moveDecision.getClusterRebalanceDecision().type()); + for (Decision d : moveDecision.getClusterRebalanceDecision().getDecisions()) { + assertEquals(Decision.Type.YES, d.type()); + assertNotNull(d.getExplanation()); + } + // verify node decisions + assertEquals(1, moveDecision.getNodeDecisions().size()); + NodeAllocationResult result = moveDecision.getNodeDecisions().get(0); + assertNotNull(result.getNode()); + assertEquals(1, result.getWeightRanking()); + assertEquals(AllocationDecision.NO, result.getNodeDecision()); + if (includeYesDecisions) { + assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(1)); + } else { + assertEquals(1, result.getCanAllocateDecision().getDecisions().size()); + } + String primaryNodeName = primaryNodeName(); + for (Decision d : result.getCanAllocateDecision().getDecisions()) { + if (d.label().equals("filter")) { + assertEquals(Decision.Type.NO, d.type()); + assertEquals("node does not match index setting [index.routing.allocation.include] filters [_name:\"" + + primaryNodeName + "\"]", d.getExplanation()); + } else { + assertEquals(Decision.Type.YES, d.type()); + assertNotNull(d.getExplanation()); + } + } + + // verify JSON output + try (XContentParser parser = getParser(explanation)) { + verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.STARTED); + parser.nextToken(); + assertEquals("can_remain_on_current_node", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.YES.toString(), parser.text()); + parser.nextToken(); + assertEquals("can_rebalance_cluster", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.YES.toString(), parser.text()); + parser.nextToken(); + assertEquals("can_rebalance_to_other_node", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.NO.toString(), parser.text()); + parser.nextToken(); + assertEquals("rebalance_explanation", parser.currentName()); + parser.nextToken(); + assertEquals("cannot rebalance as no target node exists that can both allocate this shard and improve the cluster balance", + parser.text()); + verifyNodeDecisions(parser, allNodeDecisions(AllocationDecision.NO, true), includeYesDecisions, false); + assertEquals(Token.END_OBJECT, parser.nextToken()); + } + } + + public void testAssignedReplicaOnSpecificNode() throws Exception { + logger.info("--> starting 3 nodes"); + List nodes = internalCluster().startNodes(3); + + logger.info("--> creating an index with 1 primary and 2 replicas"); + String excludedNode = nodes.get(randomIntBetween(0, 2)); + createIndexAndIndexData(1, 2, Settings.builder().put("index.routing.allocation.exclude._name", excludedNode).build(), + ActiveShardCount.from(2)); + + boolean includeYesDecisions = randomBoolean(); + boolean includeDiskInfo = randomBoolean(); + assertBusy(() -> { + if (includeDiskInfo) { + // wait till all cluster info is ready + assertEquals(3, client().admin().cluster().prepareAllocationExplain() + .setIndex("idx").setShard(0).setPrimary(true).setIncludeDiskInfo(true).get() + .getExplanation().getClusterInfo().getNodeLeastAvailableDiskUsages().size()); + } + }); + ClusterAllocationExplanation explanation = runExplain(false, replicaNode().getId(), includeYesDecisions, includeDiskInfo); + + ShardId shardId = explanation.getShard(); + boolean isPrimary = explanation.isPrimary(); + ShardRoutingState shardRoutingState = explanation.getShardState(); + DiscoveryNode currentNode = explanation.getCurrentNode(); + UnassignedInfo unassignedInfo = explanation.getUnassignedInfo(); + ClusterInfo clusterInfo = explanation.getClusterInfo(); + AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision(); + MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision(); + + // verify shard info + assertEquals("idx", shardId.getIndexName()); + assertEquals(0, shardId.getId()); + assertFalse(isPrimary); + + // verify current node info + assertEquals(ShardRoutingState.STARTED, shardRoutingState); + assertNotNull(currentNode); + assertEquals(replicaNode().getName(), currentNode.getName()); + + // verify unassigned info + assertNull(unassignedInfo); + + // verify cluster info + verifyClusterInfo(clusterInfo, includeDiskInfo, 3); + + // verify decision objects + assertFalse(allocateDecision.isDecisionTaken()); + assertTrue(moveDecision.isDecisionTaken()); + assertEquals(AllocationDecision.NO, moveDecision.getAllocationDecision()); + assertEquals("rebalancing is not allowed", moveDecision.getExplanation()); + assertTrue(moveDecision.canRemain()); + assertFalse(moveDecision.forceMove()); + assertFalse(moveDecision.canRebalanceCluster()); + assertNotNull(moveDecision.getCanRemainDecision()); + assertNull(moveDecision.getTargetNode()); + // verifying cluster rebalance decision object + assertNotNull(moveDecision.getClusterRebalanceDecision()); + assertEquals(Decision.Type.NO, moveDecision.getClusterRebalanceDecision().type()); + // verify node decisions + assertEquals(2, moveDecision.getNodeDecisions().size()); + for (NodeAllocationResult result : moveDecision.getNodeDecisions()) { + assertNotNull(result.getNode()); + assertEquals(1, result.getWeightRanking()); + assertEquals(AllocationDecision.NO, result.getNodeDecision()); + if (includeYesDecisions) { + assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(1)); + } else { + assertEquals(1, result.getCanAllocateDecision().getDecisions().size()); + } + for (Decision d : result.getCanAllocateDecision().getDecisions()) { + if (d.type() == Decision.Type.NO) { + assertThat(d.label(), isOneOf("filter", "same_shard")); + } + assertNotNull(d.getExplanation()); + } + } + + // verify JSON output + try (XContentParser parser = getParser(explanation)) { + verifyShardInfo(parser, false, includeDiskInfo, ShardRoutingState.STARTED); + parser.nextToken(); + assertEquals("can_remain_on_current_node", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.YES.toString(), parser.text()); + parser.nextToken(); + assertEquals("can_rebalance_cluster", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.NO.toString(), parser.text()); + parser.nextToken(); + assertEquals("can_rebalance_cluster_decisions", parser.currentName()); + verifyDeciders(parser, AllocationDecision.NO); + parser.nextToken(); + assertEquals("can_rebalance_to_other_node", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.NO.toString(), parser.text()); + parser.nextToken(); + assertEquals("rebalance_explanation", parser.currentName()); + parser.nextToken(); + assertEquals("rebalancing is not allowed", parser.text()); + verifyNodeDecisions(parser, allNodeDecisions(AllocationDecision.NO, false), includeYesDecisions, false); + assertEquals(Token.END_OBJECT, parser.nextToken()); + } + } + + private void verifyClusterInfo(ClusterInfo clusterInfo, boolean includeDiskInfo, int numNodes) { + if (includeDiskInfo) { + assertThat(clusterInfo.getNodeMostAvailableDiskUsages().size(), greaterThanOrEqualTo(0)); + assertThat(clusterInfo.getNodeLeastAvailableDiskUsages().size(), greaterThanOrEqualTo(0)); + assertThat(clusterInfo.getNodeMostAvailableDiskUsages().size(), lessThanOrEqualTo(numNodes)); + assertThat(clusterInfo.getNodeLeastAvailableDiskUsages().size(), lessThanOrEqualTo(numNodes)); + } else { + assertNull(clusterInfo); + } + } + + private ClusterAllocationExplanation runExplain(boolean primary, boolean includeYesDecisions, boolean includeDiskInfo) + throws Exception { + + return runExplain(primary, null, includeYesDecisions, includeDiskInfo); + } + + private ClusterAllocationExplanation runExplain(boolean primary, String nodeId, boolean includeYesDecisions, boolean includeDiskInfo) + throws Exception { + + ClusterAllocationExplanation explanation = client().admin().cluster().prepareAllocationExplain() + .setIndex("idx").setShard(0).setPrimary(primary) + .setIncludeYesDecisions(includeYesDecisions) + .setIncludeDiskInfo(includeDiskInfo) + .setCurrentNode(nodeId) + .get().getExplanation(); + if (logger.isDebugEnabled()) { + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.prettyPrint(); + builder.humanReadable(true); + logger.debug("--> explain json output: \n{}", explanation.toXContent(builder, ToXContent.EMPTY_PARAMS).string()); + } + return explanation; + } + + private void createIndexAndIndexData(int numPrimaries, int numReplicas) { + createIndexAndIndexData(numPrimaries, numReplicas, Settings.EMPTY, ActiveShardCount.ALL); + } + + private void createIndexAndIndexData(int numPrimaries, int numReplicas, Settings settings, ActiveShardCount activeShardCount) { + client().admin().indices().prepareCreate("idx") + .setSettings(Settings.builder() + .put("index.number_of_shards", numPrimaries) + .put("index.number_of_replicas", numReplicas) + .put(settings)) + .setWaitForActiveShards(activeShardCount) + .get(); + if (activeShardCount != ActiveShardCount.NONE) { + for (int i = 0; i < 10; i++) { + index("idx", "t", Integer.toString(i), Collections.singletonMap("f1", Integer.toString(i))); + } + flushAndRefresh("idx"); + } + } + + private String primaryNodeName() { + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String nodeId = clusterState.getRoutingTable().index("idx").shard(0).primaryShard().currentNodeId(); + return clusterState.getRoutingNodes().node(nodeId).node().getName(); + } + + private DiscoveryNode replicaNode() { + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String nodeId = clusterState.getRoutingTable().index("idx").shard(0).replicaShards().get(0).currentNodeId(); + return clusterState.getRoutingNodes().node(nodeId).node(); + } + + private XContentParser getParser(ClusterAllocationExplanation explanation) throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder(); + return createParser(explanation.toXContent(builder, ToXContent.EMPTY_PARAMS)); + } + + private void verifyShardInfo(XContentParser parser, boolean primary, boolean includeDiskInfo, ShardRoutingState state) + throws IOException { + + parser.nextToken(); + assertEquals(Token.START_OBJECT, parser.currentToken()); + parser.nextToken(); + assertEquals("index", parser.currentName()); + parser.nextToken(); + assertEquals("idx", parser.text()); + parser.nextToken(); + assertEquals("shard", parser.currentName()); + parser.nextToken(); + assertEquals(0, parser.intValue()); + parser.nextToken(); + assertEquals("primary", parser.currentName()); + parser.nextToken(); + assertEquals(primary, parser.booleanValue()); + parser.nextToken(); + assertEquals("current_state", parser.currentName()); + parser.nextToken(); + assertEquals(state.toString().toLowerCase(Locale.ROOT), parser.text()); + if (state == ShardRoutingState.UNASSIGNED) { + parser.nextToken(); + assertEquals("unassigned_info", parser.currentName()); + assertEquals(Token.START_OBJECT, parser.nextToken()); + Token token; + while ((token = parser.nextToken()) != Token.END_OBJECT) { // until we reach end of unassigned_info + if (token == XContentParser.Token.FIELD_NAME) { + assertNotEquals("delayed", parser.currentName()); // we should never display "delayed" from unassigned info + if (parser.currentName().equals("last_allocation_status")) { + parser.nextToken(); + assertThat(parser.text(), isOneOf(AllocationDecision.NO.toString(), + AllocationDecision.NO_VALID_SHARD_COPY.toString(), + AllocationDecision.AWAITING_INFO.toString(), + AllocationDecision.NO_ATTEMPT.toString())); + } + } + } + } else { + assertEquals(ShardRoutingState.STARTED, state); + parser.nextToken(); + assertEquals("current_node", parser.currentName()); + assertEquals(Token.START_OBJECT, parser.nextToken()); + Token token; + while ((token = parser.nextToken()) != Token.END_OBJECT) { // until we reach end of current_node + if (token == Token.FIELD_NAME) { + assertTrue(parser.currentName().equals("id") + || parser.currentName().equals("name") + || parser.currentName().equals("transport_address") + || parser.currentName().equals("weight_ranking")); + } else { + assertTrue(token.isValue()); + assertNotNull(parser.text()); + } + } + } + if (includeDiskInfo) { + // disk info is included, just verify the object is there + parser.nextToken(); + assertEquals("cluster_info", parser.currentName()); + assertEquals(Token.START_OBJECT, parser.nextToken()); + int numObjects = 1; + while (numObjects > 0) { + Token token = parser.nextToken(); + if (token == Token.START_OBJECT) { + ++numObjects; + } else if (token == Token.END_OBJECT) { + --numObjects; + } + } + } + } + + private void verifyNodeDecisions(XContentParser parser, Map expectedNodeDecisions, + boolean includeYesDecisions, boolean reuseStore) throws IOException { + parser.nextToken(); + assertEquals("node_allocation_decisions", parser.currentName()); + assertEquals(Token.START_ARRAY, parser.nextToken()); + boolean encounteredNo = false; + final int numNodes = expectedNodeDecisions.size(); + for (int i = 0; i < numNodes; i++) { + assertEquals(Token.START_OBJECT, parser.nextToken()); + parser.nextToken(); + assertEquals("node_id", parser.currentName()); + parser.nextToken(); + assertNotNull(parser.text()); + parser.nextToken(); + assertEquals("node_name", parser.currentName()); + parser.nextToken(); + String nodeName = parser.text(); + AllocationDecision allocationDecision = expectedNodeDecisions.get(nodeName); + assertNotNull(nodeName); + parser.nextToken(); + assertEquals("transport_address", parser.currentName()); + parser.nextToken(); + assertNotNull(parser.text()); + parser.nextToken(); + assertEquals("node_decision", parser.currentName()); + parser.nextToken(); + assertEquals(allocationDecision.toString(), parser.text()); + if (allocationDecision != AllocationDecision.YES) { + encounteredNo = true; + } else { + assertFalse("encountered a YES node decision after a NO node decision - sort order is wrong", encounteredNo); + } + parser.nextToken(); + if ("store".equals(parser.currentName())) { + assertTrue("store info should not be present", reuseStore); + assertEquals(Token.START_OBJECT, parser.nextToken()); + parser.nextToken(); + assertEquals("matching_size_in_bytes", parser.currentName()); + parser.nextToken(); + assertThat(parser.longValue(), greaterThan(0L)); + assertEquals(Token.END_OBJECT, parser.nextToken()); + parser.nextToken(); + } + if (reuseStore == false) { + assertEquals("weight_ranking", parser.currentName()); + parser.nextToken(); + assertThat(parser.intValue(), greaterThan(0)); + parser.nextToken(); + } + if (allocationDecision == AllocationDecision.NO || allocationDecision == AllocationDecision.THROTTLED || includeYesDecisions) { + assertEquals("deciders", parser.currentName()); + boolean atLeastOneMatchingDecisionFound = verifyDeciders(parser, allocationDecision); + parser.nextToken(); + if (allocationDecision == AllocationDecision.NO || allocationDecision == AllocationDecision.THROTTLED) { + assertTrue("decision was " + allocationDecision + " but found no node's with that decision", + atLeastOneMatchingDecisionFound); + } + } + assertEquals(Token.END_OBJECT, parser.currentToken()); + } + assertEquals(Token.END_ARRAY, parser.nextToken()); + } + + private boolean verifyDeciders(XContentParser parser, AllocationDecision allocationDecision) throws IOException { + assertEquals(Token.START_ARRAY, parser.nextToken()); + boolean atLeastOneMatchingDecisionFound = false; + while (parser.nextToken() != Token.END_ARRAY) { + assertEquals(Token.START_OBJECT, parser.currentToken()); + parser.nextToken(); + assertEquals("decider", parser.currentName()); + parser.nextToken(); + assertNotNull(parser.text()); + parser.nextToken(); + assertEquals("decision", parser.currentName()); + parser.nextToken(); + String decisionText = parser.text(); + if ((allocationDecision == AllocationDecision.NO && decisionText.equals("NO") + || (allocationDecision == AllocationDecision.THROTTLED && decisionText.equals("THROTTLE")))) { + atLeastOneMatchingDecisionFound = true; + } + assertNotNull(decisionText); + parser.nextToken(); + assertEquals("explanation", parser.currentName()); + parser.nextToken(); + assertNotNull(parser.text()); + assertEquals(Token.END_OBJECT, parser.nextToken()); + } + return atLeastOneMatchingDecisionFound; + } + + private Map allNodeDecisions(AllocationDecision allocationDecision, boolean removePrimary) { + Map nodeDecisions = new HashMap<>(); + Set allNodes = Sets.newHashSet(internalCluster().getNodeNames()); + allNodes.remove(removePrimary ? primaryNodeName() : replicaNode().getName()); + for (String nodeName : allNodes) { + nodeDecisions.put(nodeName, allocationDecision); + } + return nodeDecisions; + } + } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestTests.java index 926de8b253e..4067eb1b1ed 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestTests.java @@ -26,7 +26,8 @@ public class ClusterAllocationExplainRequestTests extends ESTestCase { public void testSerialization() throws Exception { ClusterAllocationExplainRequest request = - new ClusterAllocationExplainRequest(randomAsciiOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), randomBoolean()); + new ClusterAllocationExplainRequest(randomAsciiOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), randomBoolean(), + randomBoolean() ? randomAsciiOfLength(5) : null); request.includeYesDecisions(randomBoolean()); request.includeDiskInfo(randomBoolean()); BytesStreamOutput output = new BytesStreamOutput(); @@ -39,6 +40,7 @@ public class ClusterAllocationExplainRequestTests extends ESTestCase { assertEquals(request.isPrimary(), actual.isPrimary()); assertEquals(request.includeYesDecisions(), actual.includeYesDecisions()); assertEquals(request.includeDiskInfo(), actual.includeDiskInfo()); + assertEquals(request.getCurrentNode(), actual.getCurrentNode()); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainTests.java deleted file mode 100644 index 329cc3805ab..00000000000 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainTests.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.allocation; - -import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.test.ESSingleNodeTestCase; - - -/** - * Tests for the cluster allocation explanation - */ -public final class ClusterAllocationExplainTests extends ESSingleNodeTestCase { - - public void testShardExplain() throws Exception { - client().admin().indices().prepareCreate("test") - .setSettings("index.number_of_shards", 1, "index.number_of_replicas", 1).get(); - ClusterAllocationExplainResponse resp = client().admin().cluster().prepareAllocationExplain() - .setIndex("test").setShard(0).setPrimary(false).get(); - - ClusterAllocationExplanation cae = resp.getExplanation(); - assertNotNull("should always have an explanation", cae); - assertEquals("test", cae.getShard().getIndexName()); - assertEquals(0, cae.getShard().getId()); - assertEquals(false, cae.isPrimary()); - assertNull(cae.getAssignedNodeId()); - assertFalse(cae.isStillFetchingShardData()); - assertNotNull(cae.getUnassignedInfo()); - NodeExplanation explanation = cae.getNodeExplanations().values().iterator().next(); - ClusterAllocationExplanation.FinalDecision fd = explanation.getFinalDecision(); - ClusterAllocationExplanation.StoreCopy storeCopy = explanation.getStoreCopy(); - Decision d = explanation.getDecision(); - assertNotNull("should have a decision", d); - assertEquals(Decision.Type.NO, d.type()); - assertEquals(ClusterAllocationExplanation.FinalDecision.NO, fd); - assertEquals(ClusterAllocationExplanation.StoreCopy.AVAILABLE, storeCopy); - assertTrue(d.toString(), d.toString().contains("NO(the shard cannot be allocated to the same node")); - assertTrue(d instanceof Decision.Multi); - Decision.Multi md = (Decision.Multi) d; - Decision ssd = md.getDecisions().get(0); - assertEquals(Decision.Type.NO, ssd.type()); - assertTrue(ssd.toString(), ssd.toString().contains("NO(the shard cannot be allocated to the same node")); - Float weight = explanation.getWeight(); - assertNotNull("should have a weight", weight); - - resp = client().admin().cluster().prepareAllocationExplain().setIndex("test").setShard(0).setPrimary(true).get(); - - cae = resp.getExplanation(); - assertNotNull("should always have an explanation", cae); - assertEquals("test", cae.getShard().getIndexName()); - assertEquals(0, cae.getShard().getId()); - assertEquals(true, cae.isPrimary()); - assertFalse(cae.isStillFetchingShardData()); - assertNotNull("shard should have assigned node id", cae.getAssignedNodeId()); - assertNull("assigned shard should not have unassigned info", cae.getUnassignedInfo()); - explanation = cae.getNodeExplanations().values().iterator().next(); - d = explanation.getDecision(); - fd = explanation.getFinalDecision(); - storeCopy = explanation.getStoreCopy(); - assertNotNull("should have a decision", d); - assertEquals(Decision.Type.NO, d.type()); - assertEquals(ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED, fd); - assertEquals(ClusterAllocationExplanation.StoreCopy.AVAILABLE, storeCopy); - assertTrue(d.toString(), d.toString().contains( - "NO(the shard cannot be allocated to the node on which it already exists [[test][0]")); - assertTrue(d instanceof Decision.Multi); - md = (Decision.Multi) d; - ssd = md.getDecisions().get(0); - assertEquals(Decision.Type.NO, ssd.type()); - assertTrue(ssd.toString(), ssd.toString().contains( - "NO(the shard cannot be allocated to the node on which it already exists [[test][0]")); - weight = explanation.getWeight(); - assertNotNull("should have a weight", weight); - - resp = client().admin().cluster().prepareAllocationExplain().useAnyUnassignedShard().get(); - cae = resp.getExplanation(); - assertNotNull("should always have an explanation", cae); - assertEquals("test", cae.getShard().getIndexName()); - assertEquals(0, cae.getShard().getId()); - assertEquals(false, cae.isPrimary()); - } -} diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java index d656702f9cd..bce9afd1c1f 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java @@ -19,21 +19,19 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.apache.lucene.index.CorruptIndexException; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; -import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.elasticsearch.cluster.routing.allocation.AllocationDecision; +import org.elasticsearch.cluster.routing.allocation.MoveDecision; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -41,12 +39,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -55,123 +47,6 @@ import static java.util.Collections.emptySet; */ public final class ClusterAllocationExplanationTests extends ESTestCase { - private Index i = new Index("foo", "uuid"); - private ShardRouting primaryShard = ShardRouting.newUnassigned(new ShardId(i, 0), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); - private IndexMetaData indexMetaData = IndexMetaData.builder("foo") - .settings(Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_INDEX_UUID, "uuid")) - .putInSyncAllocationIds(0, Sets.newHashSet("aid1", "aid2")) - .numberOfShards(1) - .numberOfReplicas(1) - .build(); - private DiscoveryNode node = new DiscoveryNode("node-0", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - private static Decision.Multi yesDecision = new Decision.Multi(); - private static Decision.Multi noDecision = new Decision.Multi(); - - static { - yesDecision.add(Decision.single(Decision.Type.YES, "yes label", "yes please")); - noDecision.add(Decision.single(Decision.Type.NO, "no label", "no thanks")); - } - - private void assertExplanations(NodeExplanation ne, String finalExplanation, ClusterAllocationExplanation.FinalDecision finalDecision, - ClusterAllocationExplanation.StoreCopy storeCopy) { - assertEquals(finalExplanation, ne.getFinalExplanation()); - assertEquals(finalDecision, ne.getFinalDecision()); - assertEquals(storeCopy, ne.getStoreCopy()); - } - - public void testDecisionAndExplanation() { - Exception e = new IOException("stuff's broke, yo"); - Exception corruptE = new CorruptIndexException("stuff's corrupt, yo", ""); - Float nodeWeight = randomFloat(); - Set activeAllocationIds = new HashSet<>(); - activeAllocationIds.add("eggplant"); - ShardRouting primaryStartedShard = ShardRouting.newUnassigned(new ShardId(i, 0), true, StoreRecoverySource.EXISTING_STORE_INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "foo")); - ShardRouting replicaStartedShard = ShardRouting.newUnassigned(new ShardId(i, 0), false, PeerRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "foo")); - - IndicesShardStoresResponse.StoreStatus storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant", - IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, e); - NodeExplanation ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node, - yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, false); - assertExplanations(ne, "the copy of the shard cannot be read", - ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.IO_ERROR); - - ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight, - null, "", activeAllocationIds, false); - assertExplanations(ne, "the shard can be assigned", - ClusterAllocationExplanation.FinalDecision.YES, ClusterAllocationExplanation.StoreCopy.NONE); - - ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node, yesDecision, - nodeWeight, null, "", activeAllocationIds, false); - assertExplanations(ne, "there is no copy of the shard available", - ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.NONE); - - ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, noDecision, nodeWeight, - null, "", activeAllocationIds, false); - assertExplanations(ne, "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision", - ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.NONE); - - storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant", - IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null); - ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, noDecision, nodeWeight, - storeStatus, "", activeAllocationIds, false); - assertExplanations(ne, "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision", - ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.AVAILABLE); - - storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant", - IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, corruptE); - ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node, yesDecision, - nodeWeight, storeStatus, "", activeAllocationIds, false); - assertExplanations(ne, "the copy of the shard is corrupt", - ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.CORRUPT); - - storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "banana", - IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null); - ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight, - storeStatus, "", activeAllocationIds, false); - assertExplanations(ne, "the shard can be assigned", - ClusterAllocationExplanation.FinalDecision.YES, ClusterAllocationExplanation.StoreCopy.STALE); - - storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "banana", - IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null); - ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node, yesDecision, - nodeWeight, storeStatus, "", activeAllocationIds, false); - assertExplanations(ne, "the copy of the shard is stale, allocation ids do not match", - ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.STALE); - - storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant", - IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null); - ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight, - storeStatus, "node-0", activeAllocationIds, false); - assertExplanations(ne, "the shard is already assigned to this node", - ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED, ClusterAllocationExplanation.StoreCopy.AVAILABLE); - - storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant", - IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null); - ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight, - storeStatus, "", activeAllocationIds, false); - assertExplanations(ne, "the shard can be assigned and the node contains a valid copy of the shard data", - ClusterAllocationExplanation.FinalDecision.YES, ClusterAllocationExplanation.StoreCopy.AVAILABLE); - - storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant", - IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null); - ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node, yesDecision, - nodeWeight, storeStatus, "", activeAllocationIds, true); - assertExplanations(ne, "the shard's state is still being fetched so it cannot be allocated", - ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.AVAILABLE); - - storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant", - IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null); - ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(replicaStartedShard, indexMetaData, node, noDecision, - nodeWeight, storeStatus, "", activeAllocationIds, true); - assertExplanations(ne, "the shard cannot be assigned because allocation deciders return a NO decision", - ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.AVAILABLE); - } - public void testDecisionEquality() { Decision.Multi d = new Decision.Multi(); Decision.Multi d2 = new Decision.Multi(); @@ -185,67 +60,53 @@ public final class ClusterAllocationExplanationTests extends ESTestCase { } public void testExplanationSerialization() throws Exception { - ShardId shard = new ShardId("test", "uuid", 0); - long allocationDelay = randomIntBetween(0, 500); - long remainingDelay = randomIntBetween(0, 500); - Map nodeExplanations = new HashMap<>(1); - Float nodeWeight = randomFloat(); - Set activeAllocationIds = new HashSet<>(); - activeAllocationIds.add("eggplant"); - - IndicesShardStoresResponse.StoreStatus storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant", - IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null); - NodeExplanation ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, - yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, false); - nodeExplanations.put(ne.getNode(), ne); - ClusterAllocationExplanation cae = new ClusterAllocationExplanation(shard, true, - "assignedNode", allocationDelay, remainingDelay, null, false, nodeExplanations, null); + ClusterAllocationExplanation cae = randomClusterAllocationExplanation(randomBoolean()); BytesStreamOutput out = new BytesStreamOutput(); cae.writeTo(out); StreamInput in = out.bytes().streamInput(); ClusterAllocationExplanation cae2 = new ClusterAllocationExplanation(in); - assertEquals(shard, cae2.getShard()); + assertEquals(cae.getShard(), cae2.getShard()); + assertEquals(cae.isPrimary(), cae2.isPrimary()); assertTrue(cae2.isPrimary()); - assertTrue(cae2.isAssigned()); - assertEquals("assignedNode", cae2.getAssignedNodeId()); - assertNull(cae2.getUnassignedInfo()); - assertEquals(allocationDelay, cae2.getAllocationDelayMillis()); - assertEquals(remainingDelay, cae2.getRemainingDelayMillis()); - for (Map.Entry entry : cae2.getNodeExplanations().entrySet()) { - NodeExplanation explanation = entry.getValue(); - assertNotNull(explanation.getStoreStatus()); - assertNotNull(explanation.getDecision()); - assertEquals(nodeWeight, explanation.getWeight()); + assertEquals(cae.getUnassignedInfo(), cae2.getUnassignedInfo()); + assertEquals(cae.getCurrentNode(), cae2.getCurrentNode()); + assertEquals(cae.getShardState(), cae2.getShardState()); + if (cae.getClusterInfo() == null) { + assertNull(cae2.getClusterInfo()); + } else { + assertNotNull(cae2.getClusterInfo()); + assertEquals(cae.getClusterInfo().getNodeMostAvailableDiskUsages().size(), + cae2.getClusterInfo().getNodeMostAvailableDiskUsages().size()); } + assertEquals(cae.getShardAllocationDecision().getAllocateDecision(), cae2.getShardAllocationDecision().getAllocateDecision()); + assertEquals(cae.getShardAllocationDecision().getMoveDecision(), cae2.getShardAllocationDecision().getMoveDecision()); } public void testExplanationToXContent() throws Exception { - ShardId shardId = new ShardId("foo", "uuid", 0); - Decision.Multi d = new Decision.Multi(); - d.add(Decision.single(Decision.Type.NO, "no label", "because I said no")); - d.add(Decision.single(Decision.Type.YES, "yes label", "yes please")); - d.add(Decision.single(Decision.Type.THROTTLE, "throttle label", "wait a sec")); - Float nodeWeight = 1.5f; - Set allocationIds = new HashSet<>(); - allocationIds.add("bar"); - IndicesShardStoresResponse.StoreStatus storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant", - IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, new ElasticsearchException("stuff's broke, yo")); - NodeExplanation ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, - d, nodeWeight, storeStatus, "node-0", allocationIds, false); - Map nodeExplanations = new HashMap<>(1); - nodeExplanations.put(ne.getNode(), ne); - ClusterAllocationExplanation cae = new ClusterAllocationExplanation(shardId, true, - "assignedNode", 42, 42, null, false, nodeExplanations, null); + ClusterAllocationExplanation cae = randomClusterAllocationExplanation(true); XContentBuilder builder = XContentFactory.jsonBuilder(); cae.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals("{\"shard\":{\"index\":\"foo\",\"index_uuid\":\"uuid\",\"id\":0,\"primary\":true},\"assigned\":true," + - "\"assigned_node_id\":\"assignedNode\",\"shard_state_fetch_pending\":false,\"nodes\":{\"node-0\":" + - "{\"node_name\":\"\",\"node_attributes\":{},\"store\":{\"shard_copy\":\"IO_ERROR\",\"store_except" + - "ion\":\"ElasticsearchException[stuff's broke, yo]\"},\"final_decision\":\"ALREADY_ASSIGNED\",\"f" + - "inal_explanation\":\"the shard is already assigned to this node\",\"weight\":1.5,\"decisions\":[" + - "{\"decider\":\"no label\",\"decision\":\"NO\",\"explanation\":\"because I said no\"},{\"decider" + - "\":\"yes label\",\"decision\":\"YES\",\"explanation\":\"yes please\"},{\"decider\":\"throttle la" + - "bel\",\"decision\":\"THROTTLE\",\"explanation\":\"wait a sec\"}]}}}", - builder.string()); + assertEquals("{\"index\":\"idx\",\"shard\":0,\"primary\":true,\"current_state\":\"started\",\"current_node\":" + + "{\"id\":\"node-0\",\"name\":\"\",\"transport_address\":\"" + cae.getCurrentNode().getAddress() + + "\",\"weight_ranking\":3},\"can_remain_on_current_node\":\"yes\",\"can_rebalance_cluster\":\"yes\"," + + "\"can_rebalance_to_other_node\":\"no\",\"rebalance_explanation\":\"cannot rebalance as no target node exists " + + "that can both allocate this shard and improve the cluster balance\"}", builder.string()); + } + + private static ClusterAllocationExplanation randomClusterAllocationExplanation(boolean assignedShard) { + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(new Index("idx", "123"), 0), + assignedShard ? "node-0" : null, true, assignedShard ? ShardRoutingState.STARTED : ShardRoutingState.UNASSIGNED); + DiscoveryNode node = assignedShard ? new DiscoveryNode("node-0", buildNewFakeTransportAddress(), emptyMap(), emptySet(), + Version.CURRENT) : null; + ShardAllocationDecision shardAllocationDecision; + if (assignedShard) { + MoveDecision moveDecision = MoveDecision.cannotRebalance(Decision.YES, AllocationDecision.NO, 3, null) + .withRemainDecision(Decision.YES); + shardAllocationDecision = new ShardAllocationDecision(AllocateUnassignedDecision.NOT_TAKEN, moveDecision); + } else { + AllocateUnassignedDecision allocateDecision = AllocateUnassignedDecision.no(UnassignedInfo.AllocationStatus.DECIDERS_NO, null); + shardAllocationDecision = new ShardAllocationDecision(allocateDecision, MoveDecision.NOT_TAKEN); + } + return new ClusterAllocationExplanation(shardRouting, node, null, null, shardAllocationDecision); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 3811e37389c..aa58b57f85b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -19,9 +19,9 @@ package org.elasticsearch.cluster; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; @@ -51,7 +51,6 @@ import org.elasticsearch.plugins.ClusterPlugin; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -71,10 +70,9 @@ public class ClusterModuleTests extends ModuleTestCase { public void allocate(RoutingAllocation allocation) { // noop } - @Override - public Map weighShard(RoutingAllocation allocation, ShardRouting shard) { - return new HashMap<>(); + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + throw new UnsupportedOperationException("explain API not supported on FakeShardsAllocator"); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecisionTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecisionTests.java index 79f8f4f03bf..0f194a0f912 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecisionTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecisionTests.java @@ -71,7 +71,7 @@ public class AllocateUnassignedDecisionTests extends ESTestCase { assertThat(noDecision.getExplanation(), startsWith("cannot allocate because the cluster is still waiting")); } else { assertThat(noDecision.getExplanation(), - startsWith("cannot allocate because a previous copy of the shard existed")); + startsWith("cannot allocate because a previous copy of the primary shard existed")); } assertNull(noDecision.getNodeDecisions()); assertNull(noDecision.getTargetNode()); @@ -107,7 +107,7 @@ public class AllocateUnassignedDecisionTests extends ESTestCase { nodeDecisions.add(new NodeAllocationResult(node2, Decision.THROTTLE, 2)); AllocateUnassignedDecision throttleDecision = AllocateUnassignedDecision.throttle(nodeDecisions); assertTrue(throttleDecision.isDecisionTaken()); - assertEquals(AllocationDecision.THROTTLE, throttleDecision.getAllocationDecision()); + assertEquals(AllocationDecision.THROTTLED, throttleDecision.getAllocationDecision()); assertEquals(AllocationStatus.DECIDERS_THROTTLED, throttleDecision.getAllocationStatus()); assertThat(throttleDecision.getExplanation(), startsWith("allocation temporarily throttled")); assertEquals(nodeDecisions.stream().sorted().collect(Collectors.toList()), throttleDecision.getNodeDecisions()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationDecisionTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationDecisionTests.java index 7f3c3466cd4..0c76338cefc 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationDecisionTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationDecisionTests.java @@ -48,21 +48,21 @@ public class AllocationDecisionTests extends ESTestCase { */ public void testValuesOrder() { assertEquals(0, AllocationDecision.YES.ordinal()); - assertEquals(1, AllocationDecision.THROTTLE.ordinal()); + assertEquals(1, AllocationDecision.THROTTLED.ordinal()); assertEquals(2, AllocationDecision.NO.ordinal()); assertEquals(3, AllocationDecision.WORSE_BALANCE.ordinal()); - assertEquals(4, AllocationDecision.FETCH_PENDING.ordinal()); - assertEquals(5, AllocationDecision.DELAYED_ALLOCATION.ordinal()); + assertEquals(4, AllocationDecision.AWAITING_INFO.ordinal()); + assertEquals(5, AllocationDecision.ALLOCATION_DELAYED.ordinal()); assertEquals(6, AllocationDecision.NO_VALID_SHARD_COPY.ordinal()); assertEquals(7, AllocationDecision.NO_ATTEMPT.ordinal()); AllocationDecision[] decisions = AllocationDecision.values(); Arrays.sort(decisions); assertEquals(AllocationDecision.YES, decisions[0]); - assertEquals(AllocationDecision.THROTTLE, decisions[1]); + assertEquals(AllocationDecision.THROTTLED, decisions[1]); assertEquals(AllocationDecision.NO, decisions[2]); assertEquals(AllocationDecision.WORSE_BALANCE, decisions[3]); - assertEquals(AllocationDecision.FETCH_PENDING, decisions[4]); - assertEquals(AllocationDecision.DELAYED_ALLOCATION, decisions[5]); + assertEquals(AllocationDecision.AWAITING_INFO, decisions[4]); + assertEquals(AllocationDecision.ALLOCATION_DELAYED, decisions[5]); assertEquals(AllocationDecision.NO_VALID_SHARD_COPY, decisions[6]); assertEquals(AllocationDecision.NO_ATTEMPT, decisions[7]); } @@ -74,7 +74,7 @@ public class AllocationDecisionTests extends ESTestCase { Type type = randomFrom(Type.values()); AllocationDecision allocationDecision = AllocationDecision.fromDecisionType(type); AllocationDecision expected = type == Type.NO ? AllocationDecision.NO : - type == Type.THROTTLE ? AllocationDecision.THROTTLE : AllocationDecision.YES; + type == Type.THROTTLE ? AllocationDecision.THROTTLED : AllocationDecision.YES; assertEquals(expected, allocationDecision); } @@ -88,11 +88,11 @@ public class AllocationDecisionTests extends ESTestCase { if (allocationStatus == null) { expected = AllocationDecision.YES; } else if (allocationStatus == AllocationStatus.DECIDERS_THROTTLED) { - expected = AllocationDecision.THROTTLE; + expected = AllocationDecision.THROTTLED; } else if (allocationStatus == AllocationStatus.FETCHING_SHARD_DATA) { - expected = AllocationDecision.FETCH_PENDING; + expected = AllocationDecision.AWAITING_INFO; } else if (allocationStatus == AllocationStatus.DELAYED_ALLOCATION) { - expected = AllocationDecision.DELAYED_ALLOCATION; + expected = AllocationDecision.ALLOCATION_DELAYED; } else if (allocationStatus == AllocationStatus.NO_VALID_SHARD_COPY) { expected = AllocationDecision.NO_VALID_SHARD_COPY; } else if (allocationStatus == AllocationStatus.NO_ATTEMPT) { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index e2a360b93fe..ef5b848d63e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -45,9 +45,7 @@ import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.hamcrest.Matchers; -import java.util.HashMap; import java.util.List; -import java.util.Map; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; @@ -257,10 +255,6 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(), new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), random()), new TestGatewayAllocator(), new ShardsAllocator() { - - public Map weighShard(RoutingAllocation allocation, ShardRouting shard) { - return new HashMap(); - } /* * // this allocator tries to rebuild this scenario where a rebalance is * // triggered solely by the primary overload on node [1] where a shard @@ -327,6 +321,11 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { } } + + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + throw new UnsupportedOperationException("explain not supported"); + } }, EmptyClusterInfoService.INSTANCE); MetaData.Builder metaDataBuilder = MetaData.builder(); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java index b3089e57db6..cb69a5de87f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java @@ -60,8 +60,8 @@ public class BalancedSingleShardTests extends ESAllocationTestCase { ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.UNASSIGNED, ShardRoutingState.RELOCATING)); ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); - MoveDecision rebalanceDecision = allocator.decideRebalance(shard, newRoutingAllocation( - new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), clusterState)); + MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, newRoutingAllocation( + new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), clusterState)).getMoveDecision(); assertSame(MoveDecision.NOT_TAKEN, rebalanceDecision); } @@ -72,9 +72,9 @@ public class BalancedSingleShardTests extends ESAllocationTestCase { RoutingAllocation routingAllocation = newRoutingAllocation( new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), clusterState); routingAllocation.setHasPendingAsyncFetch(); - MoveDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation); - assertNotNull(rebalanceDecision.getCanRebalanceDecision()); - assertEquals(AllocationDecision.FETCH_PENDING, rebalanceDecision.getAllocationDecision()); + MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, routingAllocation).getMoveDecision(); + assertNotNull(rebalanceDecision.getClusterRebalanceDecision()); + assertEquals(AllocationDecision.AWAITING_INFO, rebalanceDecision.getAllocationDecision()); assertThat(rebalanceDecision.getExplanation(), startsWith("cannot rebalance as information about existing copies of this shard in the cluster is still being gathered")); assertEquals(clusterState.nodes().getSize() - 1, rebalanceDecision.getNodeDecisions().size()); @@ -96,15 +96,15 @@ public class BalancedSingleShardTests extends ESAllocationTestCase { ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); RoutingAllocation routingAllocation = newRoutingAllocation( new AllocationDeciders(Settings.EMPTY, Collections.singleton(noRebalanceDecider)), clusterState); - MoveDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation); - assertEquals(canRebalanceDecision.type(), rebalanceDecision.getCanRebalanceDecision().type()); + MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, routingAllocation).getMoveDecision(); + assertEquals(canRebalanceDecision.type(), rebalanceDecision.getClusterRebalanceDecision().type()); assertEquals(AllocationDecision.fromDecisionType(canRebalanceDecision.type()), rebalanceDecision.getAllocationDecision()); assertThat(rebalanceDecision.getExplanation(), containsString(canRebalanceDecision.type() == Type.THROTTLE ? "rebalancing is throttled" : "rebalancing is not allowed")); assertNotNull(rebalanceDecision.getNodeDecisions()); assertNull(rebalanceDecision.getTargetNode()); - assertEquals(1, rebalanceDecision.getCanRebalanceDecision().getDecisions().size()); - for (Decision subDecision : rebalanceDecision.getCanRebalanceDecision().getDecisions()) { + assertEquals(1, rebalanceDecision.getClusterRebalanceDecision().getDecisions().size()); + for (Decision subDecision : rebalanceDecision.getClusterRebalanceDecision().getDecisions()) { assertEquals("foobar", ((Decision.Single) subDecision).getExplanation()); } @@ -121,7 +121,7 @@ public class BalancedSingleShardTests extends ESAllocationTestCase { Tuple rebalance = setupStateAndRebalance(canAllocateDecider, Settings.EMPTY, true); ClusterState clusterState = rebalance.v1(); MoveDecision rebalanceDecision = rebalance.v2(); - assertEquals(Type.YES, rebalanceDecision.getCanRebalanceDecision().type()); + assertEquals(Type.YES, rebalanceDecision.getClusterRebalanceDecision().type()); assertNotNull(rebalanceDecision.getExplanation()); assertEquals(clusterState.nodes().getSize() - 1, rebalanceDecision.getNodeDecisions().size()); } @@ -136,7 +136,7 @@ public class BalancedSingleShardTests extends ESAllocationTestCase { Tuple rebalance = setupStateAndRebalance(canAllocateDecider, Settings.EMPTY, false); ClusterState clusterState = rebalance.v1(); MoveDecision rebalanceDecision = rebalance.v2(); - assertEquals(Type.YES, rebalanceDecision.getCanRebalanceDecision().type()); + assertEquals(Type.YES, rebalanceDecision.getClusterRebalanceDecision().type()); assertEquals(AllocationDecision.NO, rebalanceDecision.getAllocationDecision()); assertThat(rebalanceDecision.getExplanation(), startsWith( "cannot rebalance as no target node exists that can both allocate this shard and improve the cluster balance")); @@ -161,7 +161,7 @@ public class BalancedSingleShardTests extends ESAllocationTestCase { Tuple rebalance = setupStateAndRebalance(canAllocateDecider, balancerSettings, false); ClusterState clusterState = rebalance.v1(); MoveDecision rebalanceDecision = rebalance.v2(); - assertEquals(Type.YES, rebalanceDecision.getCanRebalanceDecision().type()); + assertEquals(Type.YES, rebalanceDecision.getClusterRebalanceDecision().type()); assertEquals(AllocationDecision.NO, rebalanceDecision.getAllocationDecision()); assertNotNull(rebalanceDecision.getExplanation()); assertEquals(clusterState.nodes().getSize() - 1, rebalanceDecision.getNodeDecisions().size()); @@ -232,7 +232,7 @@ public class BalancedSingleShardTests extends ESAllocationTestCase { routingAllocation = newRoutingAllocation(new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState); routingAllocation.debugDecision(true); ShardRouting shard = clusterState.getRoutingNodes().activePrimary(shardToRebalance.shardId()); - MoveDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation); + MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, routingAllocation).getMoveDecision(); assertEquals(shardToRebalance.relocatingNodeId(), rebalanceDecision.getTargetNode().getId()); // make sure all excluded nodes returned a NO decision for (NodeAllocationResult nodeResult : rebalanceDecision.getNodeDecisions()) { @@ -325,7 +325,7 @@ public class BalancedSingleShardTests extends ESAllocationTestCase { BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings); RoutingAllocation routingAllocation = newRoutingAllocation( new AllocationDeciders(Settings.EMPTY, Arrays.asList(allocationDecider, rebalanceDecider)), clusterState); - return allocator.decideRebalance(shardRouting, routingAllocation); + return allocator.decideShardAllocation(shardRouting, routingAllocation).getMoveDecision(); } private ClusterState addNodesToClusterState(ClusterState clusterState, int numNodesToAdd) { @@ -357,7 +357,7 @@ public class BalancedSingleShardTests extends ESAllocationTestCase { ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); RoutingAllocation routingAllocation = newRoutingAllocation( new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState); - MoveDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation); + MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, routingAllocation).getMoveDecision(); if (rebalanceExpected == false) { assertAssignedNodeRemainsSame(allocator, routingAllocation, shard); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MoveDecisionTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MoveDecisionTests.java index f066b655269..c5474803908 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MoveDecisionTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MoveDecisionTests.java @@ -72,7 +72,6 @@ public class MoveDecisionTests extends ESTestCase { assertFalse(stay.forceMove()); assertTrue(stay.isDecisionTaken()); assertNull(stay.getNodeDecisions()); - assertNotNull(stay.getExplanation()); assertEquals(AllocationDecision.NO_ATTEMPT, stay.getAllocationDecision()); stay = MoveDecision.stay(Decision.YES); @@ -80,7 +79,6 @@ public class MoveDecisionTests extends ESTestCase { assertFalse(stay.forceMove()); assertTrue(stay.isDecisionTaken()); assertNull(stay.getNodeDecisions()); - assertEquals("shard can remain on its current node", stay.getExplanation()); assertEquals(AllocationDecision.NO_ATTEMPT, stay.getAllocationDecision()); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml index f0195788e07..9e1a57a4980 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml @@ -1,20 +1,24 @@ --- "cluster shard allocation explanation test": + - skip: + version: " - 5.1.99" + reason: allocation explain api format is different in versions < 5.2.0 + - do: # there aren't any unassigned shards to explain - catch: /unable to find any shards to explain/ + catch: /illegal_state_exception/ cluster.allocation_explain: {} - do: indices.create: index: test + - match: { acknowledged: true } + - do: cluster.state: metric: [ master_node ] - - set: {master_node: node_id} - # This relies on there only being a single node in the test cluster, which # is currently true, but if this changes in the future this test will need # to be changed @@ -22,24 +26,22 @@ cluster.allocation_explain: body: { "index": "test", "shard": 0, "primary": true } - - match: { assigned: true } - # - match: { assigned_node_id: $node_id } - - is_true: assigned_node_id - - match: { shard.index: "test" } - - match: { shard.id: 0 } - - match: { shard.primary: true } - # unfortunately can't test these because they break with multi-node backwords - # compat REST tests - # - is_true: nodes.$node_id.node_name - # - match: { nodes.$node_id.node_attributes.testattr: "test" } - # - match: { nodes.$node_id.node_attributes.portsfile: "true" } - # - match: { nodes.$node_id.final_decision: "CURRENTLY_ASSIGNED" } - # - match: { nodes.$node_id.weight: 0.0 } - # - match: { nodes.$node_id.decisions.0.decider: "same_shard" } - # - match: { nodes.$node_id.decisions.0.decision: "NO" } + - match: { current_state: "started" } + - is_true: current_node.id + - match: { index: "test" } + - match: { shard: 0 } + - match: { primary: true } + - match: { can_remain_on_current_node: "yes" } + - match: { can_rebalance_cluster: "no" } + - match: { can_rebalance_to_other_node: "no" } + - match: { rebalance_explanation: "rebalancing is not allowed" } --- "cluster shard allocation explanation test with empty request": + - skip: + version: " - 5.1.99" + reason: allocation explain api format is different in versions < 5.2.0 + - do: indices.create: index: test @@ -49,22 +51,16 @@ cluster.state: metric: [ master_node ] - - set: {master_node: node_id} - - do: cluster.allocation_explain: include_disk_info: true - - match: { assigned: false } + - match: { current_state: "unassigned" } - match: { unassigned_info.reason: "INDEX_CREATED" } - is_true: unassigned_info.at - - match: { shard.index: "test" } - - match: { shard.id: 0 } - - match: { shard.primary: false } + - match: { index: "test" } + - match: { shard: 0 } + - match: { primary: false } - is_true: cluster_info - # - is_true: nodes.$node_id.node_name - # - match: { nodes.$node_id.node_attributes.testattr: "test" } - # - match: { nodes.$node_id.node_attributes.portsfile: "true" } - # - match: { nodes.$node_id.final_decision: "NO" } - # - match: { nodes.$node_id.decisions.0.decider: "same_shard" } - # - match: { nodes.$node_id.decisions.0.decision: "NO" } + - match: { can_allocate: "no" } + - match: { allocate_explanation : "cannot allocate because allocation is not permitted to any of the nodes" } From 0d40608dc807afad321917929717284639b8f9b9 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 2 Jan 2017 14:03:52 -0500 Subject: [PATCH 051/119] Remove leftover checkstyle suppression This commit removes a leftover checkstyle suppression for a source file that was temporarily forked into the codebase to hack around a bug in Log4j. When that source file was removed, the suppression was left behind. --- buildSrc/src/main/resources/checkstyle_suppressions.xml | 3 --- 1 file changed, 3 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 969f7580fea..258dafae20d 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -10,9 +10,6 @@ - - - From 47907b7093e518f204ae563f567df1b8475d03f8 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Mon, 2 Jan 2017 15:23:24 -0500 Subject: [PATCH 052/119] [TEST] fix explain API test to allow for either awaiting info state or no valid shard copy --- .../admin/cluster/allocation/ClusterAllocationExplainIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index f4ce1ec1baa..99b729ee1d9 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -109,7 +109,8 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase { // verify decision objects assertTrue(allocateDecision.isDecisionTaken()); assertFalse(moveDecision.isDecisionTaken()); - assertEquals(AllocationDecision.NO_VALID_SHARD_COPY, allocateDecision.getAllocationDecision()); + assertTrue(allocateDecision.getAllocationDecision() == AllocationDecision.NO_VALID_SHARD_COPY + || allocateDecision.getAllocationDecision() == AllocationDecision.AWAITING_INFO); assertEquals("cannot allocate because a previous copy of the primary shard existed but can no longer be " + "found on the nodes in the cluster", allocateDecision.getExplanation()); assertNull(allocateDecision.getAllocationId()); From 49298c16a9c39869f67e113dd5f8244e3094f0b0 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Mon, 2 Jan 2017 18:18:09 -0500 Subject: [PATCH 053/119] [TEST] fix explain API awaiting info explanation check --- .../cluster/allocation/ClusterAllocationExplainIT.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index 99b729ee1d9..c7f67132bdd 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -111,8 +111,13 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase { assertFalse(moveDecision.isDecisionTaken()); assertTrue(allocateDecision.getAllocationDecision() == AllocationDecision.NO_VALID_SHARD_COPY || allocateDecision.getAllocationDecision() == AllocationDecision.AWAITING_INFO); - assertEquals("cannot allocate because a previous copy of the primary shard existed but can no longer be " + - "found on the nodes in the cluster", allocateDecision.getExplanation()); + if (allocateDecision.getAllocationDecision() == AllocationDecision.NO_VALID_SHARD_COPY) { + assertEquals("cannot allocate because a previous copy of the primary shard existed but can no longer be " + + "found on the nodes in the cluster", allocateDecision.getExplanation()); + } else { + assertEquals("cannot allocate because information about existing shard data is still being retrieved from some of the nodes", + allocateDecision.getExplanation()); + } assertNull(allocateDecision.getAllocationId()); assertNull(allocateDecision.getTargetNode()); assertEquals(0L, allocateDecision.getConfiguredDelayInMillis()); From 91917d6e913329ef49b8ee9009a2094556d6e9b4 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Mon, 2 Jan 2017 18:26:19 -0500 Subject: [PATCH 054/119] [TEST] mute backwards compatability tests for explain API until 5.2 snapshot builds can be published again --- .../test/cluster.allocation_explain/10_basic.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml index 9e1a57a4980..06f9c9e12c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml @@ -1,7 +1,8 @@ --- "cluster shard allocation explanation test": - skip: - version: " - 5.1.99" + # Set to 6.0.0 for now because the 5.2 snapshot builds are failing, causing these tests to run against an old 5.2 build + version: " - 6.0.0" reason: allocation explain api format is different in versions < 5.2.0 - do: @@ -39,7 +40,8 @@ --- "cluster shard allocation explanation test with empty request": - skip: - version: " - 5.1.99" + # Set to 6.0.0 for now because the 5.2 snapshot builds are failing, causing these tests to run against an old 5.2 build + version: " - 6.0.0" reason: allocation explain api format is different in versions < 5.2.0 - do: From 38427c1df097f7bdad1bec141a6158a52a1ff6f3 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Mon, 2 Jan 2017 18:31:17 -0500 Subject: [PATCH 055/119] [TEST] don't wait for all cluster info in the explain API, just assert an upper and lower bound --- .../allocation/ClusterAllocationExplainIT.java | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index c7f67132bdd..74a710aed2a 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -284,14 +284,6 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase { boolean includeYesDecisions = randomBoolean(); boolean includeDiskInfo = randomBoolean(); - assertBusy(() -> { - if (includeDiskInfo) { - // wait till all cluster info is ready - assertEquals(3, client().admin().cluster().prepareAllocationExplain() - .setIndex("idx").setShard(0).setPrimary(true).setIncludeDiskInfo(true).get() - .getExplanation().getClusterInfo().getNodeLeastAvailableDiskUsages().size()); - } - }); ClusterAllocationExplanation explanation = runExplain(false, includeYesDecisions, includeDiskInfo); ShardId shardId = explanation.getShard(); @@ -928,14 +920,6 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase { boolean includeYesDecisions = randomBoolean(); boolean includeDiskInfo = randomBoolean(); - assertBusy(() -> { - if (includeDiskInfo) { - // wait till all cluster info is ready - assertEquals(3, client().admin().cluster().prepareAllocationExplain() - .setIndex("idx").setShard(0).setPrimary(true).setIncludeDiskInfo(true).get() - .getExplanation().getClusterInfo().getNodeLeastAvailableDiskUsages().size()); - } - }); ClusterAllocationExplanation explanation = runExplain(false, replicaNode().getId(), includeYesDecisions, includeDiskInfo); ShardId shardId = explanation.getShard(); From 6ad5486e6b605249519fd1b47b816f2a66042902 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Tue, 3 Jan 2017 12:20:17 +1100 Subject: [PATCH 056/119] Implement Comparable in Version (#22378) Supports using streams to calculate min/max of a collection of Versions, etc. --- core/src/main/java/org/elasticsearch/Version.java | 8 +++++++- core/src/test/java/org/elasticsearch/VersionTests.java | 7 +++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 474b220a9c5..bac6ccbf3fd 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -28,8 +28,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.monitor.jvm.JvmInfo; import java.io.IOException; +import java.util.Comparator; -public class Version { +public class Version implements Comparable { /* * The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA * values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 @@ -310,6 +311,11 @@ public class Version { return version.id >= id; } + @Override + public int compareTo(Version other) { + return Integer.compare(this.id, other.id); + } + /** * Returns the minimum compatible version based on the current * version. Ie a node needs to have at least the return version in order diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index 1c9964547b0..35c36bee643 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -38,7 +38,10 @@ import static org.elasticsearch.Version.V_5_0_0_alpha1; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.sameInstance; public class VersionTests extends ESTestCase { @@ -64,6 +67,10 @@ public class VersionTests extends ESTestCase { assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2"))); assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24"))); assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0"))); + + assertThat(V_2_2_0, is(lessThan(V_5_0_0_alpha1))); + assertThat(V_2_2_0.compareTo(V_2_2_0), is(0)); + assertThat(V_5_0_0_alpha1, is(greaterThan(V_2_2_0))); } public void testMin() { From 16d79842ac3aff292c65b2954ba7786a995b80fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Mon, 2 Jan 2017 11:37:29 +0100 Subject: [PATCH 057/119] Remove getters and setters for "minimumNumberShouldMatch" in BoolQueryBuilder Currently we have getters an setters for both "minimumNumberShouldMatch" and "minimumShouldMatch", which both access the same internal value (minimumShouldMatch). Since we only document the `minimum_should_match` parameter for the query DSL, I think we can deprecate the other getters and setters for 5.x and remove with 6.0, also deprecating the `minimum_number_should_match` query DSL parameter. --- .../index/query/BoolQueryBuilder.java | 47 +++++-------------- .../index/query/BoolQueryBuilderTests.java | 8 ++-- .../fetch/subphase/MatchedQueriesIT.java | 2 +- .../search/query/SearchQueryIT.java | 12 ++--- 4 files changed, 23 insertions(+), 46 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java index e5416c131a6..0a737910cea 100644 --- a/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java @@ -56,8 +56,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { private static final String SHOULD = "should"; private static final String MUST = "must"; private static final ParseField DISABLE_COORD_FIELD = new ParseField("disable_coord"); - private static final ParseField MINIMUM_SHOULD_MATCH = new ParseField("minimum_should_match"); - private static final ParseField MINIMUM_NUMBER_SHOULD_MATCH = new ParseField("minimum_number_should_match"); + private static final ParseField MINIMUM_SHOULD_MATCH = new ParseField("minimum_should_match", "minimum_number_should_match"); private static final ParseField ADJUST_PURE_NEGATIVE = new ParseField("adjust_pure_negative"); private final List mustClauses = new ArrayList<>(); @@ -167,7 +166,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { * MUST clauses one or more SHOULD clauses must match a document * for the BooleanQuery to match. No null value allowed. * - * @see #minimumNumberShouldMatch(int) + * @see #minimumShouldMatch(int) */ public BoolQueryBuilder should(QueryBuilder queryBuilder) { if (queryBuilder == null) { @@ -181,7 +180,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { * Gets the list of clauses that should be matched by the returned documents. * * @see #should(QueryBuilder) - * @see #minimumNumberShouldMatch(int) + * @see #minimumShouldMatch(int) */ public List should() { return this.shouldClauses; @@ -202,34 +201,6 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { return this.disableCoord; } - /** - * Specifies a minimum number of the optional (should) boolean clauses which must be satisfied. - *

- * By default no optional clauses are necessary for a match - * (unless there are no required clauses). If this method is used, - * then the specified number of clauses is required. - *

- * Use of this method is totally independent of specifying that - * any specific clauses are required (or prohibited). This number will - * only be compared against the number of matching optional clauses. - * - * @param minimumNumberShouldMatch the number of optional clauses that must match - */ - public BoolQueryBuilder minimumNumberShouldMatch(int minimumNumberShouldMatch) { - this.minimumShouldMatch = Integer.toString(minimumNumberShouldMatch); - return this; - } - - - /** - * Specifies a minimum number of the optional (should) boolean clauses which must be satisfied. - * @see BoolQueryBuilder#minimumNumberShouldMatch(int) - */ - public BoolQueryBuilder minimumNumberShouldMatch(String minimumNumberShouldMatch) { - this.minimumShouldMatch = minimumNumberShouldMatch; - return this; - } - /** * @return the string representation of the minimumShouldMatch settings for this query */ @@ -245,6 +216,14 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { return this; } + /** + * Sets the minimum should match as an integer value. + */ + public BoolQueryBuilder minimumShouldMatch(int minimumShouldMatch) { + this.minimumShouldMatch = Integer.toString(minimumShouldMatch); + return this; + } + /** * Returns true iff this query builder has at least one should, must, must not or filter clause. * Otherwise false. @@ -364,8 +343,6 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { minimumShouldMatch = parser.textOrNull(); } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); - } else if (MINIMUM_NUMBER_SHOULD_MATCH.match(currentFieldName)) { - minimumShouldMatch = parser.textOrNull(); } else if (ADJUST_PURE_NEGATIVE.match(currentFieldName)) { adjustPureNegative = parser.booleanValue(); } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName)) { @@ -391,7 +368,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { boolQuery.boost(boost); boolQuery.disableCoord(disableCoord); boolQuery.adjustPureNegative(adjustPureNegative); - boolQuery.minimumNumberShouldMatch(minimumShouldMatch); + boolQuery.minimumShouldMatch(minimumShouldMatch); boolQuery.queryName(queryName); return boolQuery; } diff --git a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java index ab73551d3e1..833ec6c5be3 100644 --- a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java @@ -58,7 +58,7 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase Date: Tue, 3 Jan 2017 12:36:28 +0100 Subject: [PATCH 058/119] Eliminate unneccessary declaration of IOException With this commit we remove the declaration of IOException from assertWarnings and modify all call sites. Checked with @javanna --- .../elasticsearch/common/ParseFieldTests.java | 6 ++--- .../index/analysis/AnalysisRegistryTests.java | 4 +-- .../index/mapper/DynamicTemplateTests.java | 3 +-- .../index/mapper/MapperServiceTests.java | 2 +- .../mustache/TemplateQueryBuilderTests.java | 2 +- .../file/FileBasedDiscoveryPluginTests.java | 6 ++--- .../org/elasticsearch/test/ESTestCase.java | 26 ++++++++++++------- 7 files changed, 27 insertions(+), 22 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/common/ParseFieldTests.java b/core/src/test/java/org/elasticsearch/common/ParseFieldTests.java index 59b37f9e9e6..ab70bd6ecae 100644 --- a/core/src/test/java/org/elasticsearch/common/ParseFieldTests.java +++ b/core/src/test/java/org/elasticsearch/common/ParseFieldTests.java @@ -20,15 +20,13 @@ package org.elasticsearch.common; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; - import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.sameInstance; import static org.hamcrest.collection.IsArrayContainingInAnyOrder.arrayContainingInAnyOrder; public class ParseFieldTests extends ESTestCase { - public void testParse() throws IOException { + public void testParse() { String name = "foo_bar"; ParseField field = new ParseField(name); String[] deprecated = new String[]{"barFoo", "bar_foo", "Foobar"}; @@ -48,7 +46,7 @@ public class ParseFieldTests extends ESTestCase { } } - public void testAllDeprecated() throws IOException { + public void testAllDeprecated() { String name = "like_text"; String[] deprecated = new String[]{"text", "same_as_text"}; ParseField field = new ParseField(name).withDeprecation(deprecated).withAllDeprecated("like"); diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index 432ff5247b5..9d9631e1b00 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -105,7 +105,7 @@ public class AnalysisRegistryTests extends ESTestCase { assertTrue(e.getMessage().contains("[index.analysis.analyzer.default_index] is not supported")); } - public void testBackCompatOverrideDefaultIndexAnalyzer() throws IOException { + public void testBackCompatOverrideDefaultIndexAnalyzer() { Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(Version.V_5_0_0_alpha1)); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); @@ -128,7 +128,7 @@ public class AnalysisRegistryTests extends ESTestCase { assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); } - public void testBackCompatOverrideDefaultIndexAndSearchAnalyzer() throws IOException { + public void testBackCompatOverrideDefaultIndexAndSearchAnalyzer() { Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(Version.V_5_0_0_alpha1)); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java index 42dd8c3bf37..ffc921d013a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.DynamicTemplate.XContentFieldType; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -44,7 +43,7 @@ public class DynamicTemplateTests extends ESTestCase { assertEquals("Illegal dynamic template parameter: [random_param]", e.getMessage()); } - public void testParseUnknownMatchType() throws IOException { + public void testParseUnknownMatchType() { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "short"); templateDef.put("mapping", Collections.singletonMap("store", true)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 7dc29f6b4ce..68e69c1c992 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -160,7 +160,7 @@ public class MapperServiceTests extends ESSingleNodeTestCase { assertThat(e.getMessage(), containsString("Limit of mapping depth [1] in index [test1] has been exceeded")); } - public void testUnmappedFieldType() throws IOException { + public void testUnmappedFieldType() { MapperService mapperService = createIndex("index").mapperService(); assertThat(mapperService.unmappedFieldType("keyword"), instanceOf(KeywordFieldType.class)); assertThat(mapperService.unmappedFieldType("long"), instanceOf(NumberFieldType.class)); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java index 4334de090c2..3b70c5df626 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java @@ -63,7 +63,7 @@ public class TemplateQueryBuilderTests extends AbstractQueryTestCase warnings = threadContext.getResponseHeaders().get(DeprecationLogger.WARNING_HEADER); assertNull("unexpected warning headers", warnings); } finally { - DeprecationLogger.removeThreadContext(this.threadContext); - this.threadContext.close(); + resetDeprecationLogger(); } } - protected final void assertWarnings(String... expectedWarnings) throws IOException { + protected final void assertWarnings(String... expectedWarnings) { if (enableWarningsCheck() == false) { throw new IllegalStateException("unable to check warning headers if the test is not set to do so"); } @@ -313,14 +311,24 @@ public abstract class ESTestCase extends LuceneTestCase { assertThat(actualWarnings, hasItem(equalTo(msg))); } } finally { - // "clear" current warning headers by setting a new ThreadContext - DeprecationLogger.removeThreadContext(this.threadContext); - this.threadContext.close(); - this.threadContext = new ThreadContext(Settings.EMPTY); - DeprecationLogger.setThreadContext(this.threadContext); + resetDeprecationLogger(); } } + private void resetDeprecationLogger() { + // "clear" current warning headers by setting a new ThreadContext + DeprecationLogger.removeThreadContext(this.threadContext); + try { + this.threadContext.close(); + // catch IOException to avoid that call sites have to deal with it. It is only declared because this class implements Closeable + // but it is impossible that this implementation will ever throw an IOException. + } catch (IOException ex) { + throw new AssertionError("IOException thrown while closing deprecation logger's thread context", ex); + } + this.threadContext = new ThreadContext(Settings.EMPTY); + DeprecationLogger.setThreadContext(this.threadContext); + } + private static final List statusData = new ArrayList<>(); static { // ensure that the status logger is set to the warn level so we do not miss any warnings with our Log4j usage From 96ba45e3100d13c40b31b1f0f3259e22b0b03ed4 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 3 Jan 2017 08:15:00 -0500 Subject: [PATCH 059/119] Fix stale comment in Netty4Utils We previously named the thread using a frame from the stack trace, but this was removed to simplify the code here. However, the comment explaining this was left behind and this commit cleans that up. --- .../java/org/elasticsearch/transport/netty4/Netty4Utils.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java index 08d825a354d..4d28bf9a257 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java @@ -133,8 +133,7 @@ public class Netty4Utils { * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many * invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up * to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap - * the exception so as to not lose the original cause during exit, so we give the thread a name based on the previous stack - * frame so that at least we know where it came from (in case logging the current stack trace fails). + * the exception so as to not lose the original cause during exit. */ try { // try to log the current stack trace From a773d46c6941781fd75e130217ff42500a6a588b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 3 Jan 2017 13:36:42 +0100 Subject: [PATCH 060/119] Remove deprecated `minimum_number_should_match` in BoolQueryBuilder After deprecating getters and setters and the query DSL parameter in 5.x, support for `minimum_number_should_match` can be removed entirely. Also consolidated comments with the ones on 5.x branch and added an entry to the migration docs. --- .../index/query/BoolQueryBuilder.java | 17 ++++++++++++++--- .../migration/migrate_6_0/search.asciidoc | 3 +++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java index 0a737910cea..5eb8b81009c 100644 --- a/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java @@ -56,7 +56,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { private static final String SHOULD = "should"; private static final String MUST = "must"; private static final ParseField DISABLE_COORD_FIELD = new ParseField("disable_coord"); - private static final ParseField MINIMUM_SHOULD_MATCH = new ParseField("minimum_should_match", "minimum_number_should_match"); + private static final ParseField MINIMUM_SHOULD_MATCH = new ParseField("minimum_should_match"); private static final ParseField ADJUST_PURE_NEGATIVE = new ParseField("adjust_pure_negative"); private final List mustClauses = new ArrayList<>(); @@ -209,7 +209,8 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { } /** - * Sets the minimum should match using the special syntax (for example, supporting percentage). + * Sets the minimum should match parameter using the special syntax (for example, supporting percentage). + * @see BoolQueryBuilder#minimumShouldMatch(int) */ public BoolQueryBuilder minimumShouldMatch(String minimumShouldMatch) { this.minimumShouldMatch = minimumShouldMatch; @@ -217,7 +218,17 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { } /** - * Sets the minimum should match as an integer value. + * Specifies a minimum number of the optional (should) boolean clauses which must be satisfied. + *

+ * By default no optional clauses are necessary for a match + * (unless there are no required clauses). If this method is used, + * then the specified number of clauses is required. + *

+ * Use of this method is totally independent of specifying that + * any specific clauses are required (or prohibited). This number will + * only be compared against the number of matching optional clauses. + * + * @param minimumShouldMatch the number of optional clauses that must match */ public BoolQueryBuilder minimumShouldMatch(int minimumShouldMatch) { this.minimumShouldMatch = Integer.toString(minimumShouldMatch); diff --git a/docs/reference/migration/migrate_6_0/search.asciidoc b/docs/reference/migration/migrate_6_0/search.asciidoc index bc90b90c8b2..bae77b67372 100644 --- a/docs/reference/migration/migrate_6_0/search.asciidoc +++ b/docs/reference/migration/migrate_6_0/search.asciidoc @@ -26,6 +26,9 @@ * Support for empty query objects (`{ }`) has been removed from the query DSL. An error is thrown whenever an empty query object is provided. +* The deprecated `minimum_number_should_match` parameter in the `bool` query has + been removed, use `minimum_should_match` instead. + ==== Search shards API The search shards API no longer accepts the `type` url parameter, which didn't From 71d6a37032af8af972edcf95a396f1138725ec4e Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 3 Jan 2017 15:24:53 +0100 Subject: [PATCH 061/119] [TEST] assign blacklistPathMatchers only after the contexts have been assigned There could be an issue creating the REST clients and/or making the first request to the external cluster. If that happens, the blacklist has already been assigned and the following tests will fail because of an assertion that checks that the blacklist is not already assigned when the contexts are not. --- .../test/rest/yaml/ESClientYamlSuiteTestCase.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index f025056a4e3..a45d92c9699 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -112,11 +112,6 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { if (restTestExecutionContext == null) { assert adminExecutionContext == null; assert blacklistPathMatchers == null; - String[] blacklist = resolvePathsProperty(REST_TESTS_BLACKLIST, null); - blacklistPathMatchers = new ArrayList<>(); - for (String entry : blacklist) { - blacklistPathMatchers.add(new BlacklistedPathPatternMatcher(entry)); - } String[] specPaths = resolvePathsProperty(REST_TESTS_SPEC, DEFAULT_SPEC_PATH); ClientYamlSuiteRestSpec restSpec = null; FileSystem fileSystem = getFileSystem(); @@ -149,6 +144,11 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { ClientYamlTestClient clientYamlTestClient = new ClientYamlTestClient(restSpec, restClient, hosts, esVersion); restTestExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient); adminExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient); + String[] blacklist = resolvePathsProperty(REST_TESTS_BLACKLIST, null); + blacklistPathMatchers = new ArrayList<>(); + for (String entry : blacklist) { + blacklistPathMatchers.add(new BlacklistedPathPatternMatcher(entry)); + } } assert restTestExecutionContext != null; assert adminExecutionContext != null; From f086d1d3db89e1aa0de38fac1185415da845f797 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 3 Jan 2017 09:27:53 -0500 Subject: [PATCH 062/119] Cleanup comments in LocalCheckpointService.java This commit cleans up the comments in LocalCheckpointService, making them uniform in their formatting and taking advantage of the line-length limit of 140 characters. --- .../index/seqno/LocalCheckpointService.java | 117 +++++++++++------- 1 file changed, 73 insertions(+), 44 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointService.java b/core/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointService.java index a03daf2ff23..73171c7737a 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointService.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointService.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.index.seqno; import org.apache.lucene.util.FixedBitSet; @@ -27,46 +28,53 @@ import org.elasticsearch.index.shard.ShardId; import java.util.LinkedList; /** - * This class generates sequences numbers and keeps track of the so called local checkpoint - the highest number for which - * all previous seqNo have been processed (including) + * This class generates sequences numbers and keeps track of the so called local checkpoint - the highest number for which all previous + * sequence numbers have been processed (inclusive). */ public class LocalCheckpointService extends AbstractIndexShardComponent { /** - * we keep a bit for each seq No that is still pending. to optimize allocation, we do so in multiple arrays - * allocating them on demand and cleaning up while completed. This setting controls the size of the arrays + * We keep a bit for each sequence number that is still pending. To optimize allocation, we do so in multiple arrays allocating them on + * demand and cleaning up while completed. This setting controls the size of the arrays. */ - public static Setting SETTINGS_BIT_ARRAYS_SIZE = Setting.intSetting("index.seq_no.checkpoint.bit_arrays_size", 1024, - 4, Setting.Property.IndexScope); + public static Setting SETTINGS_BIT_ARRAYS_SIZE = + Setting.intSetting("index.seq_no.checkpoint.bit_arrays_size", 1024, 4, Setting.Property.IndexScope); /** - * an ordered list of bit arrays representing pending seq nos. The list is "anchored" in {@link #firstProcessedSeqNo} - * which marks the seqNo the fist bit in the first array corresponds to. + * An ordered list of bit arrays representing pending sequence numbers. The list is "anchored" in {@link #firstProcessedSeqNo} which + * marks the sequence number the fist bit in the first array corresponds to. + */ + final LinkedList processedSeqNo = new LinkedList<>(); + + /** + * The size of each bit set representing processed sequence numbers. */ - final LinkedList processedSeqNo; private final int bitArraysSize; + + /** + * The sequence number that the first bit in the first array corresponds to. + */ long firstProcessedSeqNo; - /** the current local checkpoint, i.e., all seqNo lower (<=) than this number have been completed */ + /** + * The current local checkpoint, i.e., all sequence numbers no more than this number have been completed. + */ volatile long checkpoint; - /** the next available seqNo - used for seqNo generation */ + /** + * The next available sequence number. + */ private volatile long nextSeqNo; /** - * Initialize the local checkpoint service. The {@code maxSeqNo} should be - * set to the last sequence number assigned by this shard, or - * {@link SequenceNumbersService#NO_OPS_PERFORMED} and - * {@code localCheckpoint} should be set to the last known local checkpoint - * for this shard, or {@link SequenceNumbersService#NO_OPS_PERFORMED}. + * Initialize the local checkpoint service. The {@code maxSeqNo} should be set to the last sequence number assigned by this shard, or + * {@link SequenceNumbersService#NO_OPS_PERFORMED} and {@code localCheckpoint} should be set to the last known local checkpoint for this + * shard, or {@link SequenceNumbersService#NO_OPS_PERFORMED}. * - * @param shardId the shard this service is providing tracking - * local checkpoints for + * @param shardId the shard this service is providing tracking local checkpoints for * @param indexSettings the index settings - * @param maxSeqNo the last sequence number assigned by this shard, or - * {@link SequenceNumbersService#NO_OPS_PERFORMED} - * @param localCheckpoint the last known local checkpoint for this shard, or - * {@link SequenceNumbersService#NO_OPS_PERFORMED} + * @param maxSeqNo the last sequence number assigned by this shard, or {@link SequenceNumbersService#NO_OPS_PERFORMED} + * @param localCheckpoint the last known local checkpoint for this shard, or {@link SequenceNumbersService#NO_OPS_PERFORMED} */ LocalCheckpointService(final ShardId shardId, final IndexSettings indexSettings, final long maxSeqNo, final long localCheckpoint) { super(shardId, indexSettings); @@ -80,52 +88,63 @@ public class LocalCheckpointService extends AbstractIndexShardComponent { "max seq. no. must be non-negative or [" + SequenceNumbersService.NO_OPS_PERFORMED + "] but was [" + maxSeqNo + "]"); } bitArraysSize = SETTINGS_BIT_ARRAYS_SIZE.get(indexSettings.getSettings()); - processedSeqNo = new LinkedList<>(); firstProcessedSeqNo = localCheckpoint == SequenceNumbersService.NO_OPS_PERFORMED ? 0 : localCheckpoint + 1; - this.nextSeqNo = maxSeqNo == SequenceNumbersService.NO_OPS_PERFORMED ? 0 : maxSeqNo + 1; - this.checkpoint = localCheckpoint; + nextSeqNo = maxSeqNo == SequenceNumbersService.NO_OPS_PERFORMED ? 0 : maxSeqNo + 1; + checkpoint = localCheckpoint; } /** - * issue the next sequence number - **/ + * Issue the next sequence number. + * + * @return the next assigned sequence number + */ synchronized long generateSeqNo() { return nextSeqNo++; } /** - * marks the processing of the given seqNo have been completed - **/ - synchronized void markSeqNoAsCompleted(long seqNo) { - // make sure we track highest seen seqNo + * Marks the processing of the provided sequence number as completed as updates the checkpoint if possible. + * + * @param seqNo the sequence number to mark as completed + */ + synchronized void markSeqNoAsCompleted(final long seqNo) { + // make sure we track highest seen sequence number if (seqNo >= nextSeqNo) { nextSeqNo = seqNo + 1; } if (seqNo <= checkpoint) { - // this is possible during recovery where we might replay an op that was also replicated + // this is possible during recovery where we might replay an operation that was also replicated return; } - FixedBitSet bitSet = getBitSetForSeqNo(seqNo); - int offset = seqNoToBitSetOffset(seqNo); + final FixedBitSet bitSet = getBitSetForSeqNo(seqNo); + final int offset = seqNoToBitSetOffset(seqNo); bitSet.set(offset); if (seqNo == checkpoint + 1) { updateCheckpoint(); } } - /** gets the current check point */ + /** + * The current checkpoint which can be advanced by {@link #markSeqNoAsCompleted(long)}. + * + * @return the current checkpoint + */ public long getCheckpoint() { return checkpoint; } - /** gets the maximum seqno seen so far */ + /** + * The maximum sequence number issued so far. + * + * @return the maximum sequence number + */ long getMaxSeqNo() { return nextSeqNo - 1; } /** - * moves the checkpoint to the last consecutively processed seqNo - * Note: this method assumes that the seqNo following the current checkpoint is processed. + * Moves the checkpoint to the last consecutively processed sequence number. This method assumes that the sequence number following the + * current checkpoint is processed. */ private void updateCheckpoint() { assert Thread.holdsLock(this); @@ -135,7 +154,7 @@ public class LocalCheckpointService extends AbstractIndexShardComponent { "checkpoint + 1 doesn't point to the first bit set (o.w. current bit set is completed and shouldn't be there)"; assert getBitSetForSeqNo(checkpoint + 1).get(seqNoToBitSetOffset(checkpoint + 1)) : "updateCheckpoint is called but the bit following the checkpoint is not set"; - // keep it simple for now, get the checkpoint one by one. in the future we can optimize and read words + // keep it simple for now, get the checkpoint one by one; in the future we can optimize and read words FixedBitSet current = processedSeqNo.getFirst(); do { checkpoint++; @@ -151,14 +170,18 @@ public class LocalCheckpointService extends AbstractIndexShardComponent { } /** - * gets the bit array for the given seqNo, allocating new ones if needed. + * Return the bit array for the provided sequence number, possibly allocating a new array if needed. + * + * @param seqNo the sequence number to obtain the bit array for + * @return the bit array corresponding to the provided sequence number */ - private FixedBitSet getBitSetForSeqNo(long seqNo) { + private FixedBitSet getBitSetForSeqNo(final long seqNo) { assert Thread.holdsLock(this); assert seqNo >= firstProcessedSeqNo : "seqNo: " + seqNo + " firstProcessedSeqNo: " + firstProcessedSeqNo; final long bitSetOffset = (seqNo - firstProcessedSeqNo) / bitArraysSize; if (bitSetOffset > Integer.MAX_VALUE) { - throw new IndexOutOfBoundsException("seqNo too high. got [" + seqNo + "], firstProcessedSeqNo [" + firstProcessedSeqNo + "]"); + throw new IndexOutOfBoundsException( + "sequence number too high; got [" + seqNo + "], firstProcessedSeqNo [" + firstProcessedSeqNo + "]"); } while (bitSetOffset >= processedSeqNo.size()) { processedSeqNo.add(new FixedBitSet(bitArraysSize)); @@ -166,8 +189,14 @@ public class LocalCheckpointService extends AbstractIndexShardComponent { return processedSeqNo.get((int)bitSetOffset); } - /** maps the given seqNo to a position in the bit set returned by {@link #getBitSetForSeqNo} */ - private int seqNoToBitSetOffset(long seqNo) { + /** + * Obtain the position in the bit array corresponding to the provided sequence number. The bit array corresponding to the sequence + * number can be obtained via {@link #getBitSetForSeqNo(long)}. + * + * @param seqNo the sequence number to obtain the position for + * @return the position in the bit array corresponding to the provided sequence number + */ + private int seqNoToBitSetOffset(final long seqNo) { assert Thread.holdsLock(this); assert seqNo >= firstProcessedSeqNo; return ((int) (seqNo - firstProcessedSeqNo)) % bitArraysSize; From c5a8fd9719148b501aefe89ea3d574de6b2fdb94 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 3 Jan 2017 09:30:31 -0500 Subject: [PATCH 063/119] Cleanup some whitespace in LocalCheckpointService.java This commit just fixes a couple whitespace formatting issues in o/e/i/s/LocalCheckpointService.java. --- .../org/elasticsearch/index/seqno/LocalCheckpointService.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointService.java b/core/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointService.java index 73171c7737a..7da833cf866 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointService.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointService.java @@ -81,7 +81,7 @@ public class LocalCheckpointService extends AbstractIndexShardComponent { if (localCheckpoint < 0 && localCheckpoint != SequenceNumbersService.NO_OPS_PERFORMED) { throw new IllegalArgumentException( "local checkpoint must be non-negative or [" + SequenceNumbersService.NO_OPS_PERFORMED + "] " - + "but was [" + localCheckpoint + "]"); + + "but was [" + localCheckpoint + "]"); } if (maxSeqNo < 0 && maxSeqNo != SequenceNumbersService.NO_OPS_PERFORMED) { throw new IllegalArgumentException( @@ -186,7 +186,7 @@ public class LocalCheckpointService extends AbstractIndexShardComponent { while (bitSetOffset >= processedSeqNo.size()) { processedSeqNo.add(new FixedBitSet(bitArraysSize)); } - return processedSeqNo.get((int)bitSetOffset); + return processedSeqNo.get((int) bitSetOffset); } /** From 41c7d3e0920b851fad990a8e4fdaa7f667ad341a Mon Sep 17 00:00:00 2001 From: javanna Date: Sat, 31 Dec 2016 11:31:09 +0100 Subject: [PATCH 064/119] Remove ParseFieldMatcher usage from Mappers --- .../src/main/resources/checkstyle_suppressions.xml | 2 -- .../index/mapper/DocumentMapperParser.java | 12 ++++++------ .../org/elasticsearch/index/mapper/Mapper.java | 14 +++----------- 3 files changed, 9 insertions(+), 19 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 258dafae20d..dac28d61a8d 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -349,12 +349,10 @@ - - diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java index d05cec27b2e..031f1390756 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -52,7 +51,6 @@ public class DocumentMapperParser { private final RootObjectMapper.TypeParser rootObjectTypeParser = new RootObjectMapper.TypeParser(); private final Version indexVersionCreated; - private final ParseFieldMatcher parseFieldMatcher; private final Map typeParsers; private final Map rootTypeParsers; @@ -60,7 +58,6 @@ public class DocumentMapperParser { public DocumentMapperParser(IndexSettings indexSettings, MapperService mapperService, IndexAnalyzers indexAnalyzers, NamedXContentRegistry xContentRegistry, SimilarityService similarityService, MapperRegistry mapperRegistry, Supplier queryShardContextSupplier) { - this.parseFieldMatcher = new ParseFieldMatcher(indexSettings.getSettings()); this.mapperService = mapperService; this.indexAnalyzers = indexAnalyzers; this.xContentRegistry = xContentRegistry; @@ -72,7 +69,8 @@ public class DocumentMapperParser { } public Mapper.TypeParser.ParserContext parserContext(String type) { - return new Mapper.TypeParser.ParserContext(type, indexAnalyzers, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier); + return new Mapper.TypeParser.ParserContext(type, indexAnalyzers, similarityService::getSimilarity, mapperService, + typeParsers::get, indexVersionCreated, queryShardContextSupplier); } public DocumentMapper parse(@Nullable String type, CompressedXContent source) throws MapperParsingException { @@ -109,7 +107,8 @@ public class DocumentMapperParser { Mapper.TypeParser.ParserContext parserContext = parserContext(type); // parse RootObjectMapper - DocumentMapper.Builder docBuilder = new DocumentMapper.Builder((RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext), mapperService); + DocumentMapper.Builder docBuilder = new DocumentMapper.Builder( + (RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext), mapperService); Iterator> iterator = mapping.entrySet().iterator(); // parse DocumentMapper while(iterator.hasNext()) { @@ -143,7 +142,8 @@ public class DocumentMapperParser { } public static void checkNoRemainingFields(String fieldName, Map fieldNodeMap, Version indexVersionCreated) { - checkNoRemainingFields(fieldNodeMap, indexVersionCreated, "Mapping definition for [" + fieldName + "] has unsupported parameters: "); + checkNoRemainingFields(fieldNodeMap, indexVersionCreated, + "Mapping definition for [" + fieldName + "] has unsupported parameters: "); } public static void checkNoRemainingFields(Map fieldNodeMap, Version indexVersionCreated, String message) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 384331c2d9e..db1060d3be6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.Version; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.index.analysis.IndexAnalyzers; @@ -91,21 +90,17 @@ public abstract class Mapper implements ToXContent, Iterable { private final Version indexVersionCreated; - private final ParseFieldMatcher parseFieldMatcher; - private final Supplier queryShardContextSupplier; public ParserContext(String type, IndexAnalyzers indexAnalyzers, Function similarityLookupService, MapperService mapperService, Function typeParsers, - Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher, - Supplier queryShardContextSupplier) { + Version indexVersionCreated, Supplier queryShardContextSupplier) { this.type = type; this.indexAnalyzers = indexAnalyzers; this.similarityLookupService = similarityLookupService; this.mapperService = mapperService; this.typeParsers = typeParsers; this.indexVersionCreated = indexVersionCreated; - this.parseFieldMatcher = parseFieldMatcher; this.queryShardContextSupplier = queryShardContextSupplier; } @@ -133,10 +128,6 @@ public abstract class Mapper implements ToXContent, Iterable { return indexVersionCreated; } - public ParseFieldMatcher parseFieldMatcher() { - return parseFieldMatcher; - } - public Supplier queryShardContextSupplier() { return queryShardContextSupplier; } @@ -156,7 +147,8 @@ public abstract class Mapper implements ToXContent, Iterable { static class MultiFieldParserContext extends ParserContext { MultiFieldParserContext(ParserContext in) { - super(in.type(), in.indexAnalyzers, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in.queryShardContextSupplier()); + super(in.type(), in.indexAnalyzers, in.similarityLookupService(), in.mapperService(), in.typeParsers(), + in.indexVersionCreated(), in.queryShardContextSupplier()); } } From 8f297ec42c0e6695fb5955daaa11dc5fc9ac54e8 Mon Sep 17 00:00:00 2001 From: javanna Date: Sat, 31 Dec 2016 11:35:38 +0100 Subject: [PATCH 065/119] Remove ParseFieldMatcher usage from ParseFieldRegistry --- .../routing/allocation/command/AllocationCommands.java | 5 ++--- .../common/xcontent/ParseFieldRegistry.java | 10 +++------- .../search/aggregations/AggregatorParsers.java | 4 ++-- .../SignificantTermsAggregationBuilder.java | 2 +- .../movavg/MovAvgPipelineAggregationBuilder.java | 3 +-- .../search/builder/SearchSourceBuilder.java | 3 +-- 6 files changed, 10 insertions(+), 17 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java index 10ba3f55944..88109edf55c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java @@ -130,8 +130,7 @@ public class AllocationCommands extends ToXContentToBytes { * @return {@link AllocationCommands} read * @throws IOException if something bad happens while reading the stream */ - public static AllocationCommands fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher, - AllocationCommandRegistry registry) throws IOException { + public static AllocationCommands fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher, AllocationCommandRegistry registry) throws IOException { AllocationCommands commands = new AllocationCommands(); XContentParser.Token token = parser.currentToken(); @@ -160,7 +159,7 @@ public class AllocationCommands extends ToXContentToBytes { token = parser.nextToken(); String commandName = parser.currentName(); token = parser.nextToken(); - commands.add(registry.lookup(commandName, parseFieldMatcher, parser.getTokenLocation()).fromXContent(parser)); + commands.add(registry.lookup(commandName, parser.getTokenLocation()).fromXContent(parser)); // move to the end object one if (parser.nextToken() != XContentParser.Token.END_OBJECT) { throw new ElasticsearchParseException("allocation command is malformed, done parsing a command, but didn't get END_OBJECT, got [{}] instead", token); diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java b/core/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java index f0f2d759902..0282fba7646 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ParseFieldRegistry.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.xcontent; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.collect.Tuple; @@ -75,12 +74,11 @@ public class ParseFieldRegistry { * Lookup a value from the registry by name while checking that the name matches the ParseField. * * @param name The name of the thing to look up. - * @param parseFieldMatcher to build nice error messages. * @return The value being looked up. Never null. * @throws ParsingException if the named thing isn't in the registry or the name was deprecated and deprecated names aren't supported. */ - public T lookup(String name, ParseFieldMatcher parseFieldMatcher, XContentLocation xContentLocation) { - T value = lookupReturningNullIfNotFound(name, parseFieldMatcher); + public T lookup(String name, XContentLocation xContentLocation) { + T value = lookupReturningNullIfNotFound(name); if (value == null) { throw new ParsingException(xContentLocation, "no [" + registryName + "] registered for [" + name + "]"); } @@ -91,12 +89,10 @@ public class ParseFieldRegistry { * Lookup a value from the registry by name while checking that the name matches the ParseField. * * @param name The name of the thing to look up. - * @param parseFieldMatcher The parseFieldMatcher. This is used to resolve the {@link ParseFieldMatcher} and to build nice - * error messages. * @return The value being looked up or null if it wasn't found. * @throws ParsingException if the named thing isn't in the registry or the name was deprecated and deprecated names aren't supported. */ - public T lookupReturningNullIfNotFound(String name, ParseFieldMatcher parseFieldMatcher) { + public T lookupReturningNullIfNotFound(String name) { Tuple parseFieldAndValue = registry.get(name); if (parseFieldAndValue == null) { return null; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java index df64d440dc0..4fef1d5c59d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java @@ -53,7 +53,7 @@ public class AggregatorParsers { * @return The parser associated with the given aggregation type or null if it wasn't found. */ public Aggregator.Parser parser(String type, ParseFieldMatcher parseFieldMatcher) { - return aggregationParserRegistry.lookupReturningNullIfNotFound(type, parseFieldMatcher); + return aggregationParserRegistry.lookupReturningNullIfNotFound(type); } /** @@ -64,7 +64,7 @@ public class AggregatorParsers { * @return The parser associated with the given pipeline aggregator type or null if it wasn't found. */ public PipelineAggregator.Parser pipelineParser(String type, ParseFieldMatcher parseFieldMatcher) { - return pipelineAggregationParserRegistry.lookupReturningNullIfNotFound(type, parseFieldMatcher); + return pipelineAggregationParserRegistry.lookupReturningNullIfNotFound(type); } /** diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java index 6205ab2e787..3042824b4de 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java @@ -91,7 +91,7 @@ public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationB parser.declareObject(SignificantTermsAggregationBuilder::significanceHeuristic, (p, context) -> { SignificanceHeuristicParser significanceHeuristicParser = significanceHeuristicParserRegistry - .lookupReturningNullIfNotFound(name, context.getParseFieldMatcher()); + .lookupReturningNullIfNotFound(name); return significanceHeuristicParser.parse(context); }, new ParseField(name)); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java index dc445093f1d..30db30fcafd 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java @@ -403,8 +403,7 @@ public class MovAvgPipelineAggregationBuilder extends AbstractPipelineAggregatio factory.predict(predict); } if (model != null) { - MovAvgModel.AbstractModelParser modelParser = movingAverageMdelParserRegistry.lookup(model, context.getParseFieldMatcher(), - parser.getTokenLocation()); + MovAvgModel.AbstractModelParser modelParser = movingAverageMdelParserRegistry.lookup(model, parser.getTokenLocation()); MovAvgModel movAvgModel; try { movAvgModel = modelParser.parse(settings, pipelineAggregatorName, factory.window(), context.getParseFieldMatcher()); diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index b7ac57be43e..7ecd06c8c83 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -1008,8 +1008,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ if (token == XContentParser.Token.FIELD_NAME) { extSectionName = parser.currentName(); } else { - SearchExtParser searchExtParser = searchExtRegistry.lookup(extSectionName, - context.getParseFieldMatcher(), parser.getTokenLocation()); + SearchExtParser searchExtParser = searchExtRegistry.lookup(extSectionName, parser.getTokenLocation()); SearchExtBuilder searchExtBuilder = searchExtParser.fromXContent(parser); if (searchExtBuilder.getWriteableName().equals(extSectionName) == false) { throw new IllegalStateException("The parsed [" + searchExtBuilder.getClass().getName() + "] object has a " From 6f4faf5233ba28f5368701ca76735f227fbde897 Mon Sep 17 00:00:00 2001 From: javanna Date: Sat, 31 Dec 2016 11:36:32 +0100 Subject: [PATCH 066/119] Remove ParseFieldMatcher usage from AllocationCommands --- .../routing/allocation/command/AllocationCommands.java | 3 +-- .../rest/action/admin/cluster/RestClusterRerouteAction.java | 4 +--- .../cluster/routing/allocation/AllocationCommandsTests.java | 3 +-- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java index 88109edf55c..12a39299a04 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java @@ -23,7 +23,6 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -130,7 +129,7 @@ public class AllocationCommands extends ToXContentToBytes { * @return {@link AllocationCommands} read * @throws IOException if something bad happens while reading the stream */ - public static AllocationCommands fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher, AllocationCommandRegistry registry) throws IOException { + public static AllocationCommands fromXContent(XContentParser parser, AllocationCommandRegistry registry) throws IOException { AllocationCommands commands = new AllocationCommands(); XContentParser.Token token = parser.currentToken(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java index 31eef941a8b..8534abd1882 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java @@ -37,8 +37,6 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -53,7 +51,7 @@ import java.util.Set; public class RestClusterRerouteAction extends BaseRestHandler { private static final ObjectParser PARSER = new ObjectParser<>("cluster_reroute"); static { - PARSER.declareField((p, v, c) -> v.commands(AllocationCommands.fromXContent(p, c.getParseFieldMatcher(), c.registry)), + PARSER.declareField((p, v, c) -> v.commands(AllocationCommands.fromXContent(p, c.registry)), new ParseField("commands"), ValueType.OBJECT_ARRAY); PARSER.declareBoolean(ClusterRerouteRequest::dryRun, new ParseField("dry_run")); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index f0a50811d38..21b3e01c817 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -38,7 +38,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -490,7 +489,7 @@ public class AllocationCommandsTests extends ESAllocationTestCase { parser.nextToken(); parser.nextToken(); AllocationCommandRegistry registry = NetworkModule.getAllocationCommandRegistry(); - AllocationCommands sCommands = AllocationCommands.fromXContent(parser, ParseFieldMatcher.STRICT, registry); + AllocationCommands sCommands = AllocationCommands.fromXContent(parser, registry); assertThat(sCommands.commands().size(), equalTo(5)); assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).shardId(), equalTo(1)); From 45c67b5ee509ea06ca584bc857fbd1b1101cf023 Mon Sep 17 00:00:00 2001 From: javanna Date: Sat, 31 Dec 2016 11:39:13 +0100 Subject: [PATCH 067/119] Remove ParseFieldMatcher usage from AggregatorParsers --- .../search/aggregations/AggregatorParsers.java | 12 ++++-------- .../org/elasticsearch/search/SearchModuleTests.java | 5 ++--- .../search/aggregations/BaseAggregationTestCase.java | 2 +- .../BasePipelineAggregationTestCase.java | 2 +- .../bucketmetrics/ExtendedStatsBucketTests.java | 2 +- .../bucketmetrics/PercentilesBucketTests.java | 2 +- .../pipeline/moving/avg/MovAvgTests.java | 8 ++++---- 7 files changed, 14 insertions(+), 19 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java index 4fef1d5c59d..ce992e067cc 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.aggregations; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.ParseFieldRegistry; import org.elasticsearch.common.xcontent.XContentParser; @@ -49,10 +48,9 @@ public class AggregatorParsers { * Returns the parser that is registered under the given aggregation type. * * @param type The aggregation type - * @param parseFieldMatcher used for making error messages. * @return The parser associated with the given aggregation type or null if it wasn't found. */ - public Aggregator.Parser parser(String type, ParseFieldMatcher parseFieldMatcher) { + public Aggregator.Parser parser(String type) { return aggregationParserRegistry.lookupReturningNullIfNotFound(type); } @@ -60,10 +58,9 @@ public class AggregatorParsers { * Returns the parser that is registered under the given pipeline aggregator type. * * @param type The pipeline aggregator type - * @param parseFieldMatcher used for making error messages. * @return The parser associated with the given pipeline aggregator type or null if it wasn't found. */ - public PipelineAggregator.Parser pipelineParser(String type, ParseFieldMatcher parseFieldMatcher) { + public PipelineAggregator.Parser pipelineParser(String type) { return pipelineAggregationParserRegistry.lookupReturningNullIfNotFound(type); } @@ -142,10 +139,9 @@ public class AggregatorParsers { + aggregationName + "]: [" + pipelineAggregatorFactory + "] and [" + fieldName + "]"); } - Aggregator.Parser aggregatorParser = parser(fieldName, parseContext.getParseFieldMatcher()); + Aggregator.Parser aggregatorParser = parser(fieldName); if (aggregatorParser == null) { - PipelineAggregator.Parser pipelineAggregatorParser = pipelineParser(fieldName, - parseContext.getParseFieldMatcher()); + PipelineAggregator.Parser pipelineAggregatorParser = pipelineParser(fieldName); if (pipelineAggregatorParser == null) { throw new ParsingException(parser.getTokenLocation(), "Could not find aggregator type [" + fieldName + "] in [" + aggregationName + "]"); diff --git a/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java index 89e846bdb8b..762a02bac68 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.inject.ModuleTestCase; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -222,7 +221,7 @@ public class SearchModuleTests extends ModuleTestCase { } })); - assertNotNull(module.getAggregatorParsers().parser("test", ParseFieldMatcher.STRICT)); + assertNotNull(module.getAggregatorParsers().parser("test")); } public void testRegisterPipelineAggregation() { @@ -233,7 +232,7 @@ public class SearchModuleTests extends ModuleTestCase { } })); - assertNotNull(module.getAggregatorParsers().pipelineParser("test", ParseFieldMatcher.STRICT)); + assertNotNull(module.getAggregatorParsers().pipelineParser("test")); } private static final String[] NON_DEPRECATED_QUERIES = new String[] { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java index a39a17068f5..3b4b85fe1fb 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java @@ -124,7 +124,7 @@ public abstract class BaseAggregationTestCase { @@ -115,8 +116,7 @@ public class MovAvgTests extends BasePipelineAggregationTestCase Date: Sat, 31 Dec 2016 11:47:03 +0100 Subject: [PATCH 068/119] Remove ParseFieldMatcher usage from SearchRequest --- .../java/org/elasticsearch/action/search/SearchRequest.java | 2 +- .../main/java/org/elasticsearch/action/search/SearchType.java | 4 +--- .../elasticsearch/rest/action/search/RestSearchAction.java | 4 ++-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java index b21b0c54be3..3e65bda8ddf 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -199,7 +199,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest * "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch". */ public SearchRequest searchType(String searchType) { - return searchType(SearchType.fromString(searchType, ParseFieldMatcher.EMPTY)); + return searchType(SearchType.fromString(searchType)); } /** diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchType.java b/core/src/main/java/org/elasticsearch/action/search/SearchType.java index 31535736957..93b78156161 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchType.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchType.java @@ -19,8 +19,6 @@ package org.elasticsearch.action.search; -import org.elasticsearch.common.ParseFieldMatcher; - /** * Search type represent the manner at which the search operation is executed. * @@ -91,7 +89,7 @@ public enum SearchType { * one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch", * "query_then_fetch"/"queryThenFetch" and "query_and_fetch"/"queryAndFetch". */ - public static SearchType fromString(String searchType, ParseFieldMatcher parseFieldMatcher) { + public static SearchType fromString(String searchType) { if (searchType == null) { return SearchType.DEFAULT; } diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index b0e0ce074cf..9dd64831883 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -101,8 +101,8 @@ public class RestSearchAction extends BaseRestHandler { // from the REST layer. these modes are an internal optimization and should // not be specified explicitly by the user. String searchType = request.param("search_type"); - if (SearchType.fromString(searchType, parseFieldMatcher).equals(SearchType.QUERY_AND_FETCH) || - SearchType.fromString(searchType, parseFieldMatcher).equals(SearchType.DFS_QUERY_AND_FETCH)) { + if (SearchType.fromString(searchType).equals(SearchType.QUERY_AND_FETCH) || + SearchType.fromString(searchType).equals(SearchType.DFS_QUERY_AND_FETCH)) { throw new IllegalArgumentException("Unsupported search type [" + searchType + "]"); } else { searchRequest.searchType(searchType); From 648ed46f01e98f8d67bbab207f7dbf8d6287e29a Mon Sep 17 00:00:00 2001 From: javanna Date: Sat, 31 Dec 2016 11:57:47 +0100 Subject: [PATCH 069/119] Remove ParseFieldMatcher usages from MoreLikeThisQueryBuilder & MultiMatchQueryBuilder --- .../index/query/MoreLikeThisQueryBuilder.java | 7 +++---- .../elasticsearch/index/query/MultiMatchQueryBuilder.java | 7 +++---- .../index/query/MoreLikeThisQueryBuilderTests.java | 3 +-- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index 1166a0d678d..bfb579e0c32 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -36,7 +36,6 @@ import org.elasticsearch.action.termvectors.TermVectorsResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -352,7 +351,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder(); @@ -956,7 +955,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder Date: Tue, 3 Jan 2017 13:42:48 +0100 Subject: [PATCH 070/119] Remove unused QueryParsers#setRewriteMethod --- .../elasticsearch/index/query/support/QueryParsers.java | 7 ------- 1 file changed, 7 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java b/core/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java index e7b980cb16a..a612ad353c3 100644 --- a/core/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java +++ b/core/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java @@ -44,13 +44,6 @@ public final class QueryParsers { query.setRewriteMethod(rewriteMethod); } - public static void setRewriteMethod(MultiTermQuery query, ParseFieldMatcher matcher, @Nullable String rewriteMethod) { - if (rewriteMethod == null) { - return; - } - query.setRewriteMethod(parseRewriteMethod(matcher, rewriteMethod)); - } - public static MultiTermQuery.RewriteMethod parseRewriteMethod(ParseFieldMatcher matcher, @Nullable String rewriteMethod) { return parseRewriteMethod(matcher, rewriteMethod, MultiTermQuery.CONSTANT_SCORE_REWRITE); } From 0d67891a64cd6048d86e0544a46933b26b8d6a6e Mon Sep 17 00:00:00 2001 From: javanna Date: Sat, 31 Dec 2016 12:15:55 +0100 Subject: [PATCH 071/119] Remove ParseFieldMatcher usages from QueryParsers#parseRewriteMethod --- buildSrc/src/main/resources/checkstyle_suppressions.xml | 1 - .../org/elasticsearch/index/query/FuzzyQueryBuilder.java | 2 +- .../org/elasticsearch/index/query/MatchQueryBuilder.java | 2 +- .../index/query/MultiMatchQueryBuilder.java | 2 +- .../elasticsearch/index/query/PrefixQueryBuilder.java | 2 +- .../index/query/QueryStringQueryBuilder.java | 4 ++-- .../elasticsearch/index/query/RegexpQueryBuilder.java | 2 +- .../elasticsearch/index/query/WildcardQueryBuilder.java | 2 +- .../elasticsearch/index/query/support/QueryParsers.java | 9 ++++----- 9 files changed, 12 insertions(+), 14 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index dac28d61a8d..c8251702484 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -386,7 +386,6 @@ - diff --git a/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java index c3d892a200f..5da9edcd2a5 100644 --- a/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java @@ -337,7 +337,7 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder i query = new FuzzyQuery(new Term(fieldName, BytesRefs.toBytesRef(value)), maxEdits, prefixLength, maxExpansions, transpositions); } if (query instanceof MultiTermQuery) { - MultiTermQuery.RewriteMethod rewriteMethod = QueryParsers.parseRewriteMethod(context.getParseFieldMatcher(), rewrite, null); + MultiTermQuery.RewriteMethod rewriteMethod = QueryParsers.parseRewriteMethod(rewrite, null); QueryParsers.setRewriteMethod((MultiTermQuery) query, rewriteMethod); } return query; diff --git a/core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java index 777182527c1..20e3137b7b7 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java @@ -455,7 +455,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder { matchQuery.setFuzzyPrefixLength(prefixLength); matchQuery.setMaxExpansions(maxExpansions); matchQuery.setTranspositions(fuzzyTranspositions); - matchQuery.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(context.getParseFieldMatcher(), fuzzyRewrite, null)); + matchQuery.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(fuzzyRewrite, null)); matchQuery.setLenient(lenient); matchQuery.setCommonTermsCutoff(cutoffFrequency); matchQuery.setZeroTermsQuery(zeroTermsQuery); diff --git a/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index 706ad71752d..b668b8d9831 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -719,7 +719,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder @Override protected Query doToQuery(QueryShardContext context) throws IOException { - MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(context.getParseFieldMatcher(), rewrite, null); + MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewrite, null); Query query = null; MappedFieldType fieldType = context.fieldMapper(fieldName); diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index f60aa56d0b9..da427c9c305 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -1012,11 +1012,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder @Override protected Query doToQuery(QueryShardContext context) throws QueryShardException, IOException { - MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(context.getParseFieldMatcher(), rewrite, null); + MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewrite, null); Query query = null; MappedFieldType fieldType = context.fieldMapper(fieldName); diff --git a/core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java index 50d9c6e6a4c..006f55c8710 100644 --- a/core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java @@ -194,7 +194,7 @@ public class WildcardQueryBuilder extends AbstractQueryBuilder Date: Sat, 31 Dec 2016 12:19:36 +0100 Subject: [PATCH 072/119] Remove ParseFieldMatcher usages from a couple of Rest Actions --- .../action/admin/indices/RestAnalyzeAction.java | 5 ++--- .../indices/RestClearIndicesCacheAction.java | 6 ++---- .../admin/indices/RestAnalyzeActionTests.java | 17 ++++++++--------- 3 files changed, 12 insertions(+), 16 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java index bbe022a318c..ea836c50635 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java @@ -21,7 +21,6 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; @@ -65,7 +64,7 @@ public class RestAnalyzeAction extends BaseRestHandler { AnalyzeRequest analyzeRequest = new AnalyzeRequest(request.param("index")); try (XContentParser parser = request.contentOrSourceParamParser()) { - buildFromContent(parser, analyzeRequest, parseFieldMatcher); + buildFromContent(parser, analyzeRequest); } catch (IOException e) { throw new IllegalArgumentException("Failed to parse request body", e); } @@ -73,7 +72,7 @@ public class RestAnalyzeAction extends BaseRestHandler { return channel -> client.admin().indices().analyze(analyzeRequest, new RestToXContentListener<>(channel)); } - static void buildFromContent(XContentParser parser, AnalyzeRequest analyzeRequest, ParseFieldMatcher parseFieldMatcher) + static void buildFromContent(XContentParser parser, AnalyzeRequest analyzeRequest) throws IOException { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException("Malformed content, must start with an object"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java index d40e7759c24..f8309926882 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRespo import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -61,7 +60,7 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest( Strings.splitStringByCommaToArray(request.param("index"))); clearIndicesCacheRequest.indicesOptions(IndicesOptions.fromRequest(request, clearIndicesCacheRequest.indicesOptions())); - fromRequest(request, clearIndicesCacheRequest, parseFieldMatcher); + fromRequest(request, clearIndicesCacheRequest); return channel -> client.admin().indices().clearCache(clearIndicesCacheRequest, new RestBuilderListener(channel) { @Override @@ -79,8 +78,7 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { return false; } - public static ClearIndicesCacheRequest fromRequest(final RestRequest request, ClearIndicesCacheRequest clearIndicesCacheRequest, - ParseFieldMatcher parseFieldMatcher) { + public static ClearIndicesCacheRequest fromRequest(final RestRequest request, ClearIndicesCacheRequest clearIndicesCacheRequest) { for (Map.Entry entry : request.params().entrySet()) { if (Fields.QUERY.match(entry.getKey())) { diff --git a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java index 6bf5c515df9..da52358d3c3 100644 --- a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java +++ b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; @@ -46,7 +45,7 @@ public class RestAnalyzeActionTests extends ESTestCase { AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); - RestAnalyzeAction.buildFromContent(content, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); + RestAnalyzeAction.buildFromContent(content, analyzeRequest); assertThat(analyzeRequest.text().length, equalTo(1)); assertThat(analyzeRequest.text(), equalTo(new String[]{"THIS IS A TEST"})); @@ -79,7 +78,7 @@ public class RestAnalyzeActionTests extends ESTestCase { AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); - RestAnalyzeAction.buildFromContent(content, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); + RestAnalyzeAction.buildFromContent(content, analyzeRequest); assertThat(analyzeRequest.text().length, equalTo(1)); assertThat(analyzeRequest.text(), equalTo(new String[]{"THIS IS A TEST"})); @@ -106,7 +105,7 @@ public class RestAnalyzeActionTests extends ESTestCase { .field("unknown", "keyword") .endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY))); + () -> RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest)); assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); } @@ -117,7 +116,7 @@ public class RestAnalyzeActionTests extends ESTestCase { .field("explain", "fals") .endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> RestAnalyzeAction.buildFromContent(invalidExplain, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY))); + () -> RestAnalyzeAction.buildFromContent(invalidExplain, analyzeRequest)); assertThat(e.getMessage(), startsWith("explain must be either 'true' or 'false'")); } @@ -130,7 +129,7 @@ public class RestAnalyzeActionTests extends ESTestCase { .array("filters", "lowercase") .endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestAnalyzeAction.buildFromContent(parser, - new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY))); + new AnalyzeRequest("for test"))); assertThat(e.getMessage(), startsWith("Unknown parameter [filters]")); } @@ -142,7 +141,7 @@ public class RestAnalyzeActionTests extends ESTestCase { .array("token_filters", "lowercase") .endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestAnalyzeAction.buildFromContent(parser, - new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY))); + new AnalyzeRequest("for test"))); assertThat(e.getMessage(), startsWith("Unknown parameter [token_filters]")); } @@ -154,7 +153,7 @@ public class RestAnalyzeActionTests extends ESTestCase { .array("char_filters", "lowercase") .endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestAnalyzeAction.buildFromContent(parser, - new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY))); + new AnalyzeRequest("for test"))); assertThat(e.getMessage(), startsWith("Unknown parameter [char_filters]")); } @@ -166,7 +165,7 @@ public class RestAnalyzeActionTests extends ESTestCase { .array("token_filter", "lowercase") .endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestAnalyzeAction.buildFromContent(parser, - new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY))); + new AnalyzeRequest("for test"))); assertThat(e.getMessage(), startsWith("Unknown parameter [token_filter]")); } } From 6329a98a9700d7fdb01a0a3d50019d7e5b29f541 Mon Sep 17 00:00:00 2001 From: javanna Date: Sat, 31 Dec 2016 12:24:39 +0100 Subject: [PATCH 073/119] Remove ParseFieldMatcher usages from SearchContext --- .../org/elasticsearch/search/DefaultSearchContext.java | 5 +---- .../java/org/elasticsearch/search/SearchService.java | 3 +-- .../bucket/sampler/DiversifiedAggregatorFactory.java | 2 +- .../aggregations/bucket/sampler/SamplerAggregator.java | 3 +-- .../significant/SignificantTermsAggregatorFactory.java | 5 ++--- .../bucket/terms/TermsAggregatorFactory.java | 5 ++--- .../search/internal/FilteredSearchContext.java | 2 -- .../elasticsearch/search/internal/SearchContext.java | 10 +--------- .../search/aggregations/AggregatorTestCase.java | 2 -- .../java/org/elasticsearch/test/TestSearchContext.java | 3 --- 10 files changed, 9 insertions(+), 31 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 0c08d6c949e..571584caaef 100644 --- a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -30,7 +30,6 @@ import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; @@ -152,9 +151,7 @@ final class DefaultSearchContext extends SearchContext { DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher, IndexService indexService, IndexShard indexShard, - BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout, - FetchPhase fetchPhase) { - super(parseFieldMatcher); + BigArrays bigArrays, Counter timeEstimateCounter, TimeValue timeout, FetchPhase fetchPhase) { this.id = id; this.request = request; this.fetchPhase = fetchPhase; diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 39c029996f7..1a833da65fb 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -579,8 +579,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, - engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, - timeout, fetchPhase); + engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), timeout, fetchPhase); boolean success = false; try { // we clone the query shard context here just for rewriting otherwise we diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java index 39e5af4b78c..971c60c5eb8 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java @@ -65,7 +65,7 @@ public class DiversifiedAggregatorFactory extends ValuesSourceAggregatorFactory< if (valuesSource instanceof ValuesSource.Bytes) { ExecutionMode execution = null; if (executionHint != null) { - execution = ExecutionMode.fromString(executionHint, context.parseFieldMatcher()); + execution = ExecutionMode.fromString(executionHint); } // In some cases using ordinals is just not supported: override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 1e8238d8de3..ca93b3603a2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.bucket.sampler; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; @@ -111,7 +110,7 @@ public class SamplerAggregator extends SingleBucketAggregator { }; - public static ExecutionMode fromString(String value, ParseFieldMatcher parseFieldMatcher) { + public static ExecutionMode fromString(String value) { for (ExecutionMode mode : values()) { if (mode.parseField.match(value)) { return mode; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index 009c49af54f..bbfa21a7b16 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -29,7 +29,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lucene.index.FilterableTermsEnum; import org.elasticsearch.common.lucene.index.FreqTermsEnum; @@ -196,7 +195,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac if (valuesSource instanceof ValuesSource.Bytes) { ExecutionMode execution = null; if (executionHint != null) { - execution = ExecutionMode.fromString(executionHint, context.parseFieldMatcher()); + execution = ExecutionMode.fromString(executionHint); } if (!(valuesSource instanceof ValuesSource.Bytes.WithOrdinals)) { execution = ExecutionMode.MAP; @@ -289,7 +288,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } }; - public static ExecutionMode fromString(String value, ParseFieldMatcher parseFieldMatcher) { + public static ExecutionMode fromString(String value) { for (ExecutionMode mode : values()) { if (mode.parseField.match(value)) { return mode; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 5a512eaeb4d..6f22fad4a5d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; @@ -104,7 +103,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory searchExtBuilders = new HashMap<>(); public TestSearchContext(ThreadPool threadPool, BigArrays bigArrays, IndexService indexService) { - super(ParseFieldMatcher.STRICT); this.bigArrays = bigArrays.withCircuitBreaking(); this.indexService = indexService; this.indexFieldDataService = indexService.fieldData(); @@ -102,7 +100,6 @@ public class TestSearchContext extends SearchContext { } public TestSearchContext(QueryShardContext queryShardContext) { - super(ParseFieldMatcher.STRICT); this.bigArrays = null; this.indexService = null; this.indexFieldDataService = null; From 8b8ff8b9e2e48e1e972d6d070c1fd44a13b92a25 Mon Sep 17 00:00:00 2001 From: javanna Date: Sat, 31 Dec 2016 12:25:18 +0100 Subject: [PATCH 074/119] Remove ParseFieldMatcher usages from SearchService --- .../src/main/java/org/elasticsearch/search/SearchService.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 1a833da65fb..e228c055430 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Setting; @@ -147,12 +146,9 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private final ConcurrentMapLong activeContexts = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); - private final ParseFieldMatcher parseFieldMatcher; - public SearchService(ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, FetchPhase fetchPhase) { super(clusterService.getSettings()); - this.parseFieldMatcher = new ParseFieldMatcher(settings); this.threadPool = threadPool; this.clusterService = clusterService; this.indicesService = indicesService; From ee4dde46d33654adb3dc662fd6d55977992b4582 Mon Sep 17 00:00:00 2001 From: javanna Date: Sat, 31 Dec 2016 12:27:17 +0100 Subject: [PATCH 075/119] Remove ParseFieldMatcher usages from Aggregator --- .../java/org/elasticsearch/search/aggregations/Aggregator.java | 3 +-- .../aggregations/bucket/terms/TermsAggregationBuilder.java | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java index 7c88f6373d8..44adb33c01b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -129,7 +128,7 @@ public abstract class Aggregator extends BucketCollector implements Releasable { return parseField; } - public static SubAggCollectionMode parse(String value, ParseFieldMatcher parseFieldMatcher) { + public static SubAggCollectionMode parse(String value) { SubAggCollectionMode[] modes = SubAggCollectionMode.values(); for (SubAggCollectionMode mode : modes) { if (mode.parseField.match(value)) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index 857b3350c85..95bac1fd890 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -82,7 +82,7 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder SubAggCollectionMode.parse(p.text(), c.getParseFieldMatcher()), + (p, c) -> SubAggCollectionMode.parse(p.text()), SubAggCollectionMode.KEY, ObjectParser.ValueType.STRING); PARSER.declareObjectArray(TermsAggregationBuilder::order, TermsAggregationBuilder::parseOrderParam, From 6f242920d3590ab36e08918c6a5a69ad6a689d9c Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Tue, 3 Jan 2017 11:39:40 -0500 Subject: [PATCH 076/119] [TEST] only check node decisions if not in the AWAITING_INFO state --- .../ClusterAllocationExplainIT.java | 31 ++++++++++--------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index 74a710aed2a..6574d10b51a 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -122,21 +122,24 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase { assertNull(allocateDecision.getTargetNode()); assertEquals(0L, allocateDecision.getConfiguredDelayInMillis()); assertEquals(0L, allocateDecision.getRemainingDelayInMillis()); - assertEquals(0, allocateDecision.getNodeDecisions().size()); - // verify JSON output - try (XContentParser parser = getParser(explanation)) { - verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.UNASSIGNED); - parser.nextToken(); - assertEquals("can_allocate", parser.currentName()); - parser.nextToken(); - assertEquals(AllocationDecision.NO_VALID_SHARD_COPY.toString(), parser.text()); - parser.nextToken(); - assertEquals("allocate_explanation", parser.currentName()); - parser.nextToken(); - assertEquals("cannot allocate because a previous copy of the primary shard existed but can no longer be found " + - "on the nodes in the cluster", parser.text()); - assertEquals(Token.END_OBJECT, parser.nextToken()); + if (allocateDecision.getAllocationDecision() == AllocationDecision.NO_VALID_SHARD_COPY) { + assertEquals(0, allocateDecision.getNodeDecisions().size()); + + // verify JSON output + try (XContentParser parser = getParser(explanation)) { + verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.UNASSIGNED); + parser.nextToken(); + assertEquals("can_allocate", parser.currentName()); + parser.nextToken(); + assertEquals(AllocationDecision.NO_VALID_SHARD_COPY.toString(), parser.text()); + parser.nextToken(); + assertEquals("allocate_explanation", parser.currentName()); + parser.nextToken(); + assertEquals("cannot allocate because a previous copy of the primary shard existed but can no longer be found " + + "on the nodes in the cluster", parser.text()); + assertEquals(Token.END_OBJECT, parser.nextToken()); + } } } From 64888ab1d31c25ac518b1341cf23b1fd1d8d4fbc Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 3 Jan 2017 11:43:04 -0500 Subject: [PATCH 077/119] Cleanup comments in SequenceNumbersService.java This commit cleans up the comments in SequenceNumbersService, making them uniform in their formatting and taking advantage of the line-length limit of 140 characters. --- .../index/seqno/SequenceNumbersService.java | 85 ++++++++++++------- 1 file changed, 55 insertions(+), 30 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java index 09ed72fd92a..bdd4f1e141b 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.index.seqno; import org.elasticsearch.index.IndexSettings; @@ -25,10 +26,13 @@ import org.elasticsearch.index.shard.ShardId; import java.util.Set; /** - * a very light weight implementation. will be replaced with proper machinery later + * Encapsulates the local and global checkpoints into a single service for use as a shard component. */ public class SequenceNumbersService extends AbstractIndexShardComponent { + /** + * Represents an unassigned sequence number (e.g., can be used on primary operations before they are executed). + */ public static final long UNASSIGNED_SEQ_NO = -2L; /** @@ -36,8 +40,8 @@ public class SequenceNumbersService extends AbstractIndexShardComponent { */ public static final long NO_OPS_PERFORMED = -1L; - final LocalCheckpointService localCheckpointService; - final GlobalCheckpointService globalCheckpointService; + private final LocalCheckpointService localCheckpointService; + private final GlobalCheckpointService globalCheckpointService; /** * Initialize the sequence number service. The {@code maxSeqNo} should be set to the last sequence number assigned by this shard, or @@ -45,11 +49,11 @@ public class SequenceNumbersService extends AbstractIndexShardComponent { * shard, or {@link SequenceNumbersService#NO_OPS_PERFORMED}, and {@code globalCheckpoint} should be set to the last known global * checkpoint for this shard, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. * - * @param shardId the shard this service is providing tracking local checkpoints for - * @param indexSettings the index settings - * @param maxSeqNo the last sequence number assigned by this shard, or {@link SequenceNumbersService#NO_OPS_PERFORMED} - * @param localCheckpoint the last known local checkpoint for this shard, or {@link SequenceNumbersService#NO_OPS_PERFORMED} - * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} + * @param shardId the shard this service is providing tracking local checkpoints for + * @param indexSettings the index settings + * @param maxSeqNo the last sequence number assigned by this shard, or {@link SequenceNumbersService#NO_OPS_PERFORMED} + * @param localCheckpoint the last known local checkpoint for this shard, or {@link SequenceNumbersService#NO_OPS_PERFORMED} + * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} */ public SequenceNumbersService( final ShardId shardId, @@ -63,58 +67,77 @@ public class SequenceNumbersService extends AbstractIndexShardComponent { } /** - * generates a new sequence number. - * Note: you must call {@link #markSeqNoAsCompleted(long)} after the operation for which this seq# was generated - * was completed (whether successfully or with a failure) + * Issue the next sequence number. Note that you must call {@link #markSeqNoAsCompleted(long)} after the operation for which the + * issued sequence number completes (whether or not the operation completes successfully). + * + * @return the next assigned sequence number */ public long generateSeqNo() { return localCheckpointService.generateSeqNo(); } /** - * Gets the maximum sequence number seen so far. See {@link LocalCheckpointService#getMaxSeqNo()} for details. + * The maximum sequence number issued so far. See {@link LocalCheckpointService#getMaxSeqNo()} for additional details. + * + * @return the maximum sequence number */ public long getMaxSeqNo() { return localCheckpointService.getMaxSeqNo(); } /** - * marks the given seqNo as completed. See {@link LocalCheckpointService#markSeqNoAsCompleted(long)} - * more details + * Marks the processing of the provided sequence number as completed as updates the checkpoint if possible. + * See {@link LocalCheckpointService#markSeqNoAsCompleted(long)} for additional details. + * + * @param seqNo the sequence number to mark as completed */ - public void markSeqNoAsCompleted(long seqNo) { + public void markSeqNoAsCompleted(final long seqNo) { localCheckpointService.markSeqNoAsCompleted(seqNo); } /** - * Gets sequence number related stats + * The current sequence number stats. + * + * @return stats encapuslating the maximum sequence number, the local checkpoint and the global checkpoint */ public SeqNoStats stats() { return new SeqNoStats(getMaxSeqNo(), getLocalCheckpoint(), getGlobalCheckpoint()); } /** - * notifies the service of a local checkpoint. - * see {@link GlobalCheckpointService#updateLocalCheckpoint(String, long)} for details. + * Notifies the service to update the local checkpoint for the shard with the provided allocation ID. + * + * @param allocationId the allocation ID of the shard to update the local checkpoint for + * @param checkpoint the local checkpoint for the shard */ - public void updateLocalCheckpointForShard(String allocationId, long checkpoint) { + public void updateLocalCheckpointForShard(final String allocationId, final long checkpoint) { globalCheckpointService.updateLocalCheckpoint(allocationId, checkpoint); } /** - * marks the allocationId as "in sync" with the primary shard. - * see {@link GlobalCheckpointService#markAllocationIdAsInSync(String)} for details. + * Marks the shard with the provided allocation ID as in-sync with the primary shard. See + * {@link GlobalCheckpointService#markAllocationIdAsInSync(String)} for additional details. * - * @param allocationId allocationId of the recovering shard + * @param allocationId the allocation ID of the shard to mark as in-sync */ - public void markAllocationIdAsInSync(String allocationId) { + public void markAllocationIdAsInSync(final String allocationId) { globalCheckpointService.markAllocationIdAsInSync(allocationId); } + /** + * Returns the local checkpoint for the shard. + * + * @return the local checkpoint + */ public long getLocalCheckpoint() { return localCheckpointService.getCheckpoint(); } + /** + * Returns the global checkpoint for the shard. + * + * @return the global checkpoint + */ public long getGlobalCheckpoint() { return globalCheckpointService.getCheckpoint(); } @@ -122,28 +145,30 @@ public class SequenceNumbersService extends AbstractIndexShardComponent { /** * Scans through the currently known local checkpoint and updates the global checkpoint accordingly. * - * @return true if the checkpoint has been updated or if it can not be updated since one of the local checkpoints - * of one of the active allocations is not known. + * @return {@code true} if the checkpoint has been updated or if it can not be updated since one of the local checkpoints of one of the + * active allocations is not known. */ public boolean updateGlobalCheckpointOnPrimary() { return globalCheckpointService.updateCheckpointOnPrimary(); } /** - * updates the global checkpoint on a replica shard (after it has been updated by the primary). + * Updates the global checkpoint on a replica shard after it has been updated by the primary. + * + * @param checkpoint the global checkpoint */ - public void updateGlobalCheckpointOnReplica(long checkpoint) { + public void updateGlobalCheckpointOnReplica(final long checkpoint) { globalCheckpointService.updateCheckpointOnReplica(checkpoint); } /** - * Notifies the service of the current allocation ids in the cluster state. - * see {@link GlobalCheckpointService#updateAllocationIdsFromMaster(Set, Set)} for details. + * Notifies the service of the current allocation IDs in the cluster state. See + * {@link GlobalCheckpointService#updateAllocationIdsFromMaster(Set, Set)} for details. * * @param activeAllocationIds the allocation ids of the currently active shard copies * @param initializingAllocationIds the allocation ids of the currently initializing shard copies */ - public void updateAllocationIdsFromMaster(Set activeAllocationIds, Set initializingAllocationIds) { + public void updateAllocationIdsFromMaster(final Set activeAllocationIds, final Set initializingAllocationIds) { globalCheckpointService.updateAllocationIdsFromMaster(activeAllocationIds, initializingAllocationIds); } From 9a65d2008eefcb59387b8b44f0c46d8a668d8b87 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 3 Jan 2017 12:23:33 -0500 Subject: [PATCH 078/119] Cleanup comments in GlobalCheckpointService.java This commit cleans up the comments in GlobalCheckpointService, making them uniform in their formatting and taking advantage of the line-length limit of 140 characters. --- .../index/seqno/GlobalCheckpointService.java | 139 ++++++++++-------- .../index/seqno/GlobalCheckpointTests.java | 2 +- 2 files changed, 75 insertions(+), 66 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointService.java b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointService.java index e8aa0cdeb89..68eaf86f4f8 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointService.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointService.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.index.seqno; import com.carrotsearch.hppc.ObjectLongHashMap; @@ -31,95 +32,94 @@ import java.util.Set; import static org.elasticsearch.index.seqno.SequenceNumbersService.UNASSIGNED_SEQ_NO; /** - * A shard component that is responsible of tracking the global checkpoint. The global checkpoint - * is the highest seq_no for which all lower (or equal) seq_no have been processed on all shards that - * are currently active. Since shards count as "active" when the master starts them, and before this primary shard - * has been notified of this fact, we also include shards that have completed recovery. These shards have received - * all old operations via the recovery mechanism and are kept up to date by the various replications actions. The set - * of shards that are taken into account for the global checkpoint calculation are called the "in sync" shards. - * + * A shard component that is responsible of tracking the global checkpoint. The global checkpoint is the highest sequence number for which + * all lower (or equal) sequence number have been processed on all shards that are currently active. Since shards count as "active" when the + * master starts them, and before this primary shard has been notified of this fact, we also include shards that have completed recovery. + * These shards have received all old operations via the recovery mechanism and are kept up to date by the various replications actions. + * The set of shards that are taken into account for the global checkpoint calculation are called the "in-sync shards". *

- * The global checkpoint is maintained by the primary shard and is replicated to all the replicas - * (via {@link GlobalCheckpointSyncAction}). + * The global checkpoint is maintained by the primary shard and is replicated to all the replicas (via {@link GlobalCheckpointSyncAction}). */ public class GlobalCheckpointService extends AbstractIndexShardComponent { - - /** - * This map holds the last known local checkpoint for every active shard and initializing shard copies that has been brought up - * to speed through recovery. These shards are treated as valid copies and participate in determining the global - * checkpoint. - *

- * Keyed by allocation ids. + /* + * This map holds the last known local checkpoint for every active shard and initializing shard copies that has been brought up to speed + * through recovery. These shards are treated as valid copies and participate in determining the global checkpoint. This map is keyed by + * allocation IDs. All accesses to this set are guarded by a lock on this. */ - private final ObjectLongMap inSyncLocalCheckpoints; // keyed by allocation ids + private final ObjectLongMap inSyncLocalCheckpoints; - /** - * This set holds the last set of known valid allocation ids as received by the master. This is important to make sure - * shard that are failed or relocated are cleaned up from {@link #inSyncLocalCheckpoints} and do not hold the global - * checkpoint back + /* + * This set holds the last set of known valid allocation ids as received by the master. This is important to make sure shard that are + * failed or relocated are cleaned up from {@link #inSyncLocalCheckpoints} and do not hold the global checkpoint back. All accesses to + * this set are guarded by a lock on this. */ private final Set assignedAllocationIds; + /* + * The current global checkpoint for this shard. Note that this field is guarded by a lock on this and thus this field does not need to + * be volatile. + */ private long globalCheckpoint; /** - * Initialize the global checkpoint service. The {@code globalCheckpoint} - * should be set to the last known global checkpoint for this shard, or - * {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. + * Initialize the global checkpoint service. The specified global checkpoint should be set to the last known global checkpoint for this + * shard, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. * - * @param shardId the shard this service is providing tracking - * local checkpoints for + * @param shardId the shard this service is tracking local checkpoints for * @param indexSettings the index settings - * @param globalCheckpoint the last known global checkpoint for this shard, - * or - * {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} + * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} */ GlobalCheckpointService(final ShardId shardId, final IndexSettings indexSettings, final long globalCheckpoint) { super(shardId, indexSettings); - assert globalCheckpoint >= UNASSIGNED_SEQ_NO : "illegal initial global checkpoint:" + globalCheckpoint; + assert globalCheckpoint >= UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint; inSyncLocalCheckpoints = new ObjectLongHashMap<>(1 + indexSettings.getNumberOfReplicas()); assignedAllocationIds = new HashSet<>(1 + indexSettings.getNumberOfReplicas()); this.globalCheckpoint = globalCheckpoint; } /** - * Notifies the service of a local checkpoint. If the checkpoint is lower than the currently known one, - * this is a noop. Last, if the allocation id is not in sync, it is ignored. This to prevent late - * arrivals from shards that are removed to be re-added. + * Notifies the service to update the local checkpoint for the shard with the provided allocation ID. If the checkpoint is lower than + * the currently known one, this is a no-op. If the allocation ID is not in sync, it is ignored. This is to prevent late arrivals from + * shards that are removed to be re-added. + * + * @param allocationId the allocation ID of the shard to update the local checkpoint for + * @param checkpoint the local checkpoint for the shard */ - public synchronized void updateLocalCheckpoint(String allocationId, long localCheckpoint) { + public synchronized void updateLocalCheckpoint(final String allocationId, final long checkpoint) { final int indexOfKey = inSyncLocalCheckpoints.indexOf(allocationId); if (indexOfKey >= 0) { final long current = inSyncLocalCheckpoints.indexGet(indexOfKey); - - if (current < localCheckpoint) { - inSyncLocalCheckpoints.indexReplace(indexOfKey, localCheckpoint); + if (current < checkpoint) { + inSyncLocalCheckpoints.indexReplace(indexOfKey, checkpoint); if (logger.isTraceEnabled()) { - logger.trace("updated local checkpoint of [{}] to [{}] (was [{}])", allocationId, localCheckpoint, current); + logger.trace("updated local checkpoint of [{}] to [{}] (was [{}])", allocationId, checkpoint, current); } } else { - logger.trace("skipping update of local checkpoint [{}], current checkpoint is higher " + - "(current [{}], incoming [{}], type [{}])", - allocationId, current, localCheckpoint, allocationId); + logger.trace( + "skipping update of local checkpoint [{}], current checkpoint is higher (current [{}], incoming [{}], type [{}])", + allocationId, + current, + checkpoint, + allocationId); } } else { - logger.trace("[{}] isn't marked as in sync. ignoring local checkpoint of [{}].", allocationId, localCheckpoint); + logger.trace("[{}] isn't marked as in sync. ignoring local checkpoint of [{}].", allocationId, checkpoint); } } /** - * Scans through the currently known local checkpoints and updates the global checkpoint accordingly. + * Scans through the currently known local checkpoint and updates the global checkpoint accordingly. * - * @return true if the checkpoint has been updated or if it can not be updated since one of the local checkpoints - * of one of the active allocations is not known. + * @return {@code true} if the checkpoint has been updated or if it can not be updated since one of the local checkpoints of one of the + * active allocations is not known. */ synchronized boolean updateCheckpointOnPrimary() { long minCheckpoint = Long.MAX_VALUE; if (inSyncLocalCheckpoints.isEmpty()) { return false; } - for (ObjectLongCursor cp : inSyncLocalCheckpoints) { + for (final ObjectLongCursor cp : inSyncLocalCheckpoints) { if (cp.value == UNASSIGNED_SEQ_NO) { logger.trace("unknown local checkpoint for active allocationId [{}], requesting a sync", cp.key); return true; @@ -139,36 +139,39 @@ public class GlobalCheckpointService extends AbstractIndexShardComponent { } /** - * gets the current global checkpoint. See java docs for {@link GlobalCheckpointService} for more details + * Returns the global checkpoint for the shard. + * + * @return the global checkpoint */ public synchronized long getCheckpoint() { return globalCheckpoint; } /** - * updates the global checkpoint on a replica shard (after it has been updated by the primary). + * Updates the global checkpoint on a replica shard after it has been updated by the primary. + * + * @param checkpoint the global checkpoint */ - synchronized void updateCheckpointOnReplica(long globalCheckpoint) { + synchronized void updateCheckpointOnReplica(final long checkpoint) { /* * The global checkpoint here is a local knowledge which is updated under the mandate of the primary. It can happen that the primary * information is lagging compared to a replica (e.g., if a replica is promoted to primary but has stale info relative to other * replica shards). In these cases, the local knowledge of the global checkpoint could be higher than sync from the lagging primary. */ - if (this.globalCheckpoint <= globalCheckpoint) { - this.globalCheckpoint = globalCheckpoint; - logger.trace("global checkpoint updated from primary to [{}]", globalCheckpoint); + if (this.globalCheckpoint <= checkpoint) { + this.globalCheckpoint = checkpoint; + logger.trace("global checkpoint updated from primary to [{}]", checkpoint); } } /** - * Notifies the service of the current allocation ids in the cluster state. This method trims any shards that - * have been removed. + * Notifies the service of the current allocation ids in the cluster state. This method trims any shards that have been removed. * - * @param activeAllocationIds the allocation ids of the currently active shard copies - * @param initializingAllocationIds the allocation ids of the currently initializing shard copies + * @param activeAllocationIds the allocation IDs of the currently active shard copies + * @param initializingAllocationIds the allocation IDs of the currently initializing shard copies */ - public synchronized void updateAllocationIdsFromMaster(Set activeAllocationIds, - Set initializingAllocationIds) { + public synchronized void updateAllocationIdsFromMaster(final Set activeAllocationIds, + final Set initializingAllocationIds) { assignedAllocationIds.removeIf( aId -> activeAllocationIds.contains(aId) == false && initializingAllocationIds.contains(aId) == false); assignedAllocationIds.addAll(activeAllocationIds); @@ -182,22 +185,28 @@ public class GlobalCheckpointService extends AbstractIndexShardComponent { } /** - * marks the allocationId as "in sync" with the primary shard. This should be called at the end of recovery - * where the primary knows all operation below the global checkpoint have been completed on this shard. + * Marks the shard with the provided allocation ID as in-sync with the primary shard. This should be called at the end of recovery where + * the primary knows all operations below the global checkpoint have been completed on this shard. * - * @param allocationId allocationId of the recovering shard + * @param allocationId the allocation ID of the shard to mark as in-sync */ - public synchronized void markAllocationIdAsInSync(String allocationId) { + public synchronized void markAllocationIdAsInSync(final String allocationId) { if (assignedAllocationIds.contains(allocationId) == false) { - // master have change it's mind and removed this allocation, ignore. + // master has removed this allocation, ignore return; } logger.trace("marked [{}] as in sync", allocationId); inSyncLocalCheckpoints.put(allocationId, UNASSIGNED_SEQ_NO); } - // for testing - synchronized long getLocalCheckpointForAllocation(String allocationId) { + /** + * Returns the local checkpoint for the shard with the specified allocation ID, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} if + * the shard is not in-sync. + * + * @param allocationId the allocation ID of the shard to obtain the local checkpoint for + * @return the local checkpoint, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} + */ + synchronized long getLocalCheckpointForAllocationId(final String allocationId) { if (inSyncLocalCheckpoints.containsKey(allocationId)) { return inSyncLocalCheckpoints.get(allocationId); } diff --git a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTests.java b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTests.java index f27dfd189be..8d8be2e402d 100644 --- a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTests.java +++ b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTests.java @@ -116,7 +116,7 @@ public class GlobalCheckpointTests extends ESTestCase { // first check that adding it without the master blessing doesn't change anything. checkpointService.updateLocalCheckpoint(extraId, maxLocalCheckpoint + 1 + randomInt(4)); - assertThat(checkpointService.getLocalCheckpointForAllocation(extraId), equalTo(UNASSIGNED_SEQ_NO)); + assertThat(checkpointService.getLocalCheckpointForAllocationId(extraId), equalTo(UNASSIGNED_SEQ_NO)); Set newActive = new HashSet<>(active); newActive.add(extraId); From 1286d1ac8358e2acc70ea7307eea7090afbbfa15 Mon Sep 17 00:00:00 2001 From: Mike Dias Date: Tue, 3 Jan 2017 15:28:29 -0200 Subject: [PATCH 079/119] Fix listing format in reindex docs (#22420) --- docs/reference/docs/reindex.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index a1f868358eb..0e95c951cb3 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -891,6 +891,7 @@ Which results in a sensible `total` like this one: Adding `slices` to `_reindex` just automates the manual process used in the section above, creating sub-requests which means it has some quirks: + * You can see these requests in the <>. These sub-requests are "child" tasks of the task for the request with `slices`. * Fetching the status of the task for the request with `slices` only contains From c6ddff757e0f044d8b0cf503edd7a4f7d84c9d5a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 3 Jan 2017 15:07:53 -0500 Subject: [PATCH 080/119] Cleanup some comments in IndexShard.java This commit cleans up the comments in IndexShard related to sequence numbers, making them uniform in their formatting and taking advantage of the line-length limit of 140 characters. --- .../index/seqno/SequenceNumbersService.java | 7 +-- .../elasticsearch/index/shard/IndexShard.java | 52 ++++++++++++------- 2 files changed, 37 insertions(+), 22 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java index bdd4f1e141b..9fed5f9cf7e 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java @@ -105,7 +105,8 @@ public class SequenceNumbersService extends AbstractIndexShardComponent { } /** - * Notifies the service to update the local checkpoint for the shard with the provided allocation ID. + * Notifies the service to update the local checkpoint for the shard with the provided allocation ID. See + * {@link GlobalCheckpointService#updateLocalCheckpoint(String, long)} for details. * * @param allocationId the allocation ID of the shard to update the local checkpoint for * @param checkpoint the local checkpoint for the shard @@ -165,8 +166,8 @@ public class SequenceNumbersService extends AbstractIndexShardComponent { * Notifies the service of the current allocation IDs in the cluster state. See * {@link GlobalCheckpointService#updateAllocationIdsFromMaster(Set, Set)} for details. * - * @param activeAllocationIds the allocation ids of the currently active shard copies - * @param initializingAllocationIds the allocation ids of the currently initializing shard copies + * @param activeAllocationIds the allocation IDs of the currently active shard copies + * @param initializingAllocationIds the allocation IDs of the currently initializing shard copies */ public void updateAllocationIdsFromMaster(final Set activeAllocationIds, final Set initializingAllocationIds) { globalCheckpointService.updateAllocationIdsFromMaster(activeAllocationIds, initializingAllocationIds); diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index b9eb50545da..b7a1fdd2ca2 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1364,36 +1364,49 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } /** - * notifies the service of a local checkpoint. see {@link GlobalCheckpointService#updateLocalCheckpoint(String, long)} for details. + * Notifies the service to update the local checkpoint for the shard with the provided allocation ID. See + * {@link GlobalCheckpointService#updateLocalCheckpoint(String, long)} for details. + * + * @param allocationId the allocation ID of the shard to update the local checkpoint for + * @param checkpoint the local checkpoint for the shard */ - public void updateLocalCheckpointForShard(String allocationId, long checkpoint) { + public void updateLocalCheckpointForShard(final String allocationId, final long checkpoint) { verifyPrimary(); getEngine().seqNoService().updateLocalCheckpointForShard(allocationId, checkpoint); } /** - * marks the allocationId as "in sync" with the primary shard. see {@link GlobalCheckpointService#markAllocationIdAsInSync(String)} - * for details. + * Marks the shard with the provided allocation ID as in-sync with the primary shard. See + * {@link GlobalCheckpointService#markAllocationIdAsInSync(String)} for additional details. * - * @param allocationId allocationId of the recovering shard + * @param allocationId the allocation ID of the shard to mark as in-sync */ - public void markAllocationIdAsInSync(String allocationId) { + public void markAllocationIdAsInSync(final String allocationId) { verifyPrimary(); getEngine().seqNoService().markAllocationIdAsInSync(allocationId); } + /** + * Returns the local checkpoint for the shard. + * + * @return the local checkpoint + */ public long getLocalCheckpoint() { return getEngine().seqNoService().getLocalCheckpoint(); } + /** + * Returns the global checkpoint for the shard. + * + * @return the global checkpoint + */ public long getGlobalCheckpoint() { return getEngine().seqNoService().getGlobalCheckpoint(); } /** - * Checks whether the global checkpoint can be updated based on current knowledge of local checkpoints on the different - * shard copies. The checkpoint is updated or more information is required from the replica, a global checkpoint sync - * is initiated. + * Checks whether the global checkpoint can be updated based on current knowledge of local checkpoints on the different shard copies. + * The checkpoint is updated or if more information is required from the replica, a global checkpoint sync is initiated. */ public void updateGlobalCheckpointOnPrimary() { verifyPrimary(); @@ -1403,24 +1416,26 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } /** - * updates the global checkpoint on a replica shard (after it has been updated by the primary). + * Updates the global checkpoint on a replica shard after it has been updated by the primary. + * + * @param checkpoint the global checkpoint */ - public void updateGlobalCheckpointOnReplica(long checkpoint) { + public void updateGlobalCheckpointOnReplica(final long checkpoint) { verifyReplicationTarget(); getEngine().seqNoService().updateGlobalCheckpointOnReplica(checkpoint); } /** - * Notifies the service of the current allocation ids in the cluster state. - * see {@link GlobalCheckpointService#updateAllocationIdsFromMaster(Set, Set)} for details. + * Notifies the service of the current allocation IDs in the cluster state. See + * {@link GlobalCheckpointService#updateAllocationIdsFromMaster(Set, Set)} for details. * - * @param activeAllocationIds the allocation ids of the currently active shard copies - * @param initializingAllocationIds the allocation ids of the currently initializing shard copies + * @param activeAllocationIds the allocation IDs of the currently active shard copies + * @param initializingAllocationIds the allocation IDs of the currently initializing shard copies */ - public void updateAllocationIdsFromMaster(Set activeAllocationIds, Set initializingAllocationIds) { + public void updateAllocationIdsFromMaster(final Set activeAllocationIds, final Set initializingAllocationIds) { verifyPrimary(); - Engine engine = getEngineOrNull(); - // if engine is not yet started, we are not ready yet and can just ignore this + final Engine engine = getEngineOrNull(); + // if the engine is not yet started, we are not ready yet and can just ignore this if (engine != null) { engine.seqNoService().updateAllocationIdsFromMaster(activeAllocationIds, initializingAllocationIds); } @@ -1435,7 +1450,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl internalIndexingStats.noopUpdate(type); } - private void checkIndex() throws IOException { if (store.tryIncRef()) { try { From c6573e6e560c91db6845393d1afbdb01952483cd Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 3 Jan 2017 23:38:35 +0100 Subject: [PATCH 081/119] Filter actions to trace in test Notifications for request tracing are invoked concurrently and can still be in flight once a tracer is installed in the test. This can lead to side-effects since the test relied on exact invocations. This commit adds action filtering to the test tracer to only count invocations for the relevant actions. Closes #22418 --- .../AbstractSimpleTransportTestCase.java | 48 ++++++++++++------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 8852b386ffd..6c407c88874 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Constants; @@ -32,7 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -44,7 +42,6 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -58,8 +55,10 @@ import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -836,7 +835,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { assertTrue(inFlight.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS)); } - @TestLogging("org.elasticsearch.transport:DEBUG") public void testTracerLog() throws InterruptedException { TransportRequestHandler handler = (request, channel) -> channel.sendResponse(new StringMessageResponse("")); TransportRequestHandler handlerWithError = new TransportRequestHandler() { @@ -879,7 +877,10 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceB.registerRequestHandler("test", StringMessageRequest::new, ThreadPool.Names.SAME, handler); serviceB.registerRequestHandler("testError", StringMessageRequest::new, ThreadPool.Names.SAME, handlerWithError); - final Tracer tracer = new Tracer(); + final Tracer tracer = new Tracer(new HashSet<>(Arrays.asList("test", "testError"))); + // the tracer is invoked concurrently after the actual action is executed. that means a Tracer#requestSent can still be in-flight + // from a handshake executed on connect in the setup method. this might confuse this test since it expects exact number of + // invocations. To prevent any unrelated events messing with this test we filter on the actions we execute in this test. serviceA.addTracer(tracer); serviceB.addTracer(tracer); @@ -943,6 +944,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } private static class Tracer extends MockTransportService.Tracer { + private final Set actions; public volatile boolean sawRequestSent; public volatile boolean sawRequestReceived; public volatile boolean sawResponseSent; @@ -950,42 +952,52 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public volatile boolean sawResponseReceived; public AtomicReference expectedEvents = new AtomicReference<>(); - private final Logger logger = Loggers.getLogger(getClass()); - + public Tracer(Set actions) { + this.actions = actions; + } @Override public void receivedRequest(long requestId, String action) { super.receivedRequest(requestId, action); - sawRequestReceived = true; - expectedEvents.get().countDown(); + if (actions.contains(action)) { + sawRequestReceived = true; + expectedEvents.get().countDown(); + } } @Override public void requestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) { super.requestSent(node, requestId, action, options); - sawRequestSent = true; - expectedEvents.get().countDown(); + if (actions.contains(action)) { + sawRequestSent = true; + expectedEvents.get().countDown(); + } } @Override public void responseSent(long requestId, String action) { super.responseSent(requestId, action); - logger.debug("#### responseSent for action: {}", action); - sawResponseSent = true; - expectedEvents.get().countDown(); + if (actions.contains(action)) { + sawResponseSent = true; + expectedEvents.get().countDown(); + } } @Override public void responseSent(long requestId, String action, Throwable t) { super.responseSent(requestId, action, t); - sawErrorSent = true; - expectedEvents.get().countDown(); + if (actions.contains(action)) { + sawErrorSent = true; + expectedEvents.get().countDown(); + } } @Override public void receivedResponse(long requestId, DiscoveryNode sourceNode, String action) { super.receivedResponse(requestId, sourceNode, action); - sawResponseReceived = true; - expectedEvents.get().countDown(); + if (actions.contains(action)) { + sawResponseReceived = true; + expectedEvents.get().countDown(); + } } public void reset(int expectedCount) { From 360ce532eb200a676ce96eaafde3ccb1c80d9cd7 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Wed, 4 Jan 2017 10:42:22 +0100 Subject: [PATCH 082/119] Implement stats for geo_point and geo_shape field (#22391) Currently `geo_point` and `geo_shape` field are treated as `text` field by the field stats API and we try to extract the min/max values with MultiFields.getTerms. This is ok in master because a `geo_point` field is always a Point field but it can cause problem in 5.x (and 2.x) because the legacy `geo_point` are indexed as terms. As a result the min and max are extracted and then printed in the FieldStats output using BytesRef.utf8ToString which can throw an IndexOutOfBoundException since it's not valid UTF8 strings. This change ensure that we never try to extract min/max information from a `geo_point` field. It does not add a new type for geo points in the fieldstats API so we'll continue to use `text` for this kind of field. This PR is targeted to master even though we could only commit this change to 5.x. I think it's cleaner to have it in master too before we make any decision on https://github.com/elastic/elasticsearch/pull/21947. Fixes #22384 --- .../index/mapper/BaseGeoPointFieldMapper.java | 21 +++- .../index/mapper/GeoShapeFieldMapper.java | 26 ++++- .../test/field_stats/10_basics.yaml | 104 +++++++++++++++++- 3 files changed, 139 insertions(+), 12 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/BaseGeoPointFieldMapper.java index c4f9e7a1cb3..c075d784c94 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/BaseGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/BaseGeoPointFieldMapper.java @@ -19,14 +19,15 @@ package org.elasticsearch.index.mapper; -import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; -import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.logging.DeprecationLogger; @@ -170,6 +171,20 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr public Query termQuery(Object value, QueryShardContext context) { throw new QueryShardException(context, "Geo fields do not support exact searching, use dedicated geo queries instead: [" + name() + "]"); } + + @Override + public FieldStats stats(IndexReader reader) throws IOException { + int maxDoc = reader.maxDoc(); + FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name()); + if (fi == null) { + return null; + } + /** + * we don't have a specific type for geo_point so we use an empty {@link FieldStats.Text}. + * TODO: we should maybe support a new type that knows how to (de)encode the min/max information + */ + return new FieldStats.Text(maxDoc, -1, -1, -1, isSearchable(), isAggregatable()); + } } protected Explicit ignoreMalformed; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index f584c216709..f1a73308692 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -18,12 +18,11 @@ */ package org.elasticsearch.index.mapper; -import org.apache.lucene.index.IndexableField; -import org.locationtech.spatial4j.shape.Point; -import org.locationtech.spatial4j.shape.Shape; -import org.locationtech.spatial4j.shape.jts.JtsGeometry; import org.apache.lucene.document.Field; +import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.apache.lucene.spatial.prefix.PrefixTreeStrategy; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; @@ -33,8 +32,8 @@ import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; import org.elasticsearch.Version; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.geo.SpatialStrategy; import org.elasticsearch.common.geo.builders.ShapeBuilder; @@ -45,6 +44,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.io.IOException; import java.util.Iterator; @@ -415,6 +417,20 @@ public class GeoShapeFieldMapper extends FieldMapper { public Query termQuery(Object value, QueryShardContext context) { throw new QueryShardException(context, "Geo fields do not support exact searching, use dedicated geo queries instead"); } + + @Override + public FieldStats stats(IndexReader reader) throws IOException { + int maxDoc = reader.maxDoc(); + FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name()); + if (fi == null) { + return null; + } + /** + * we don't have a specific type for geo_shape so we use an empty {@link FieldStats.Text}. + * TODO: we should maybe support a new type that knows how to (de)encode the min/max information + */ + return new FieldStats.Text(maxDoc, -1, -1, -1, isSearchable(), isAggregatable()); + } } protected Explicit coerce; diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/field_stats/10_basics.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/field_stats/10_basics.yaml index 35cc19224ec..37aac939f7a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/field_stats/10_basics.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/field_stats/10_basics.yaml @@ -12,6 +12,12 @@ setup: type: long bar: type: long + geo: + type: geo_point + geo_shape: + type: geo_shape + tree: quadtree + precision: 1m - do: indices.create: @@ -26,20 +32,26 @@ setup: type: long bar: type: text + geo: + type: geo_point + geo_shape: + type: geo_shape + tree: quadtree + precision: 1m - do: index: index: test_1 type: test id: id_1 - body: { foo: "bar", number: 123, bar: 123 } + body: { foo: "bar", number: 123, bar: 123, geo: { lat: 48.858093, lon: 2.294694} } - do: index: index: test_2 type: test id: id_10 - body: { foo: "babar", number: 456, bar: "123" } + body: { foo: "babar", number: 456, bar: "123", geo_shape: {type: "linestring", coordinates : [[-77.03653, 38.897676], [-77.009051, 38.889939]] } } - do: indices.refresh: {} @@ -48,7 +60,7 @@ setup: "Basic field stats": - do: field_stats: - fields: [foo, number] + fields: [foo, number, geo, geo_shape] - match: { indices._all.fields.foo.max_doc: 2 } - match: { indices._all.fields.foo.doc_count: 2 } @@ -68,13 +80,54 @@ setup: - match: { indices._all.fields.number.max_value: 456 } - match: { indices._all.fields.number.max_value_as_string: "456" } - match: { indices._all.fields.number.type: "integer" } + - match: { indices._all.fields.geo.type: "string" } + - match: { indices._all.fields.geo.max_doc: 1 } + - match: { indices._all.fields.geo.doc_count: -1 } + - match: { indices._all.fields.geo.searchable: true } + - match: { indices._all.fields.geo.aggregatable: true } + - match: { indices._all.fields.geo_shape.type: "string" } + - match: { indices._all.fields.geo_shape.max_doc: 1 } + - match: { indices._all.fields.geo_shape.searchable: true } + - match: { indices._all.fields.geo_shape.aggregatable: false } + - is_false: conflicts +--- +"Geopoint field stats": + - skip: + version: " - 5.2.0" + reason: geo_point fields don't return min/max for versions greater than 5.2.0 + + - do: + field_stats: + fields: [geo, geo_shape] + + - match: { indices._all.fields.geo.type: "string" } + - match: { indices._all.fields.geo.max_doc: 1 } + - match: { indices._all.fields.geo.doc_count: -1 } + - match: { indices._all.fields.geo.searchable: true } + - match: { indices._all.fields.geo.aggregatable: true } + - is_false: indices._all.fields.geo.min_value + - is_false: indices._all.fields.geo.max_value + - is_false: indices._all.fields.geo.min_value_as_string + - is_false: indices._all.fields.geo.max_value_as_string + - match: { indices._all.fields.geo_shape.type: "string" } + - match: { indices._all.fields.geo_shape.max_doc: 1 } + - match: { indices._all.fields.geo_shape.doc_count: -1 } + - match: { indices._all.fields.geo_shape.searchable: true } + - match: { indices._all.fields.geo_shape.aggregatable: false } + - is_false: indices._all.fields.geo_shape.min_value + - is_false: indices._all.fields.geo_shape.max_value + - is_false: indices._all.fields.geo_shape.min_value_as_string + - is_false: indices._all.fields.geo_shape.max_value_as_string + - is_false: conflicts + + --- "Basic field stats with level set to indices": - do: field_stats: - fields: [foo, number] + fields: [foo, number, geo, geo_shape] level: indices - match: { indices.test_1.fields.foo.max_doc: 1 } @@ -95,6 +148,10 @@ setup: - match: { indices.test_1.fields.number.max_value: 123 } - match: { indices.test_1.fields.number.max_value_as_string: "123" } - match: { indices.test_1.fields.number.type: "integer" } + - match: { indices.test_1.fields.geo.type: "string" } + - match: { indices.test_1.fields.geo.max_doc: 1 } + - match: { indices.test_1.fields.geo.searchable: true } + - match: { indices.test_1.fields.geo.aggregatable: true } - match: { indices.test_2.fields.foo.max_doc: 1 } - match: { indices.test_2.fields.foo.doc_count: 1 } - match: { indices.test_2.fields.foo.min_value: "babar" } @@ -114,6 +171,45 @@ setup: - match: { indices.test_2.fields.number.max_value: 456 } - match: { indices.test_2.fields.number.max_value_as_string: "456" } - match: { indices.test_2.fields.number.type: "integer" } + - match: { indices.test_2.fields.geo_shape.type: "string" } + - match: { indices.test_2.fields.geo_shape.max_doc: 1 } + - match: { indices.test_2.fields.geo_shape.searchable: true } + - match: { indices.test_2.fields.geo_shape.aggregatable: false } + - is_false: indices.test_2.fields.geo + - is_false: conflicts + + +--- +"Geopoint field stats with level set to indices": + - skip: + version: " - 5.2.0" + reason: geo_point fields don't return min/max for versions greater than 5.2.0 + + - do: + field_stats: + fields: [geo, geo_shape] + level: indices + + - match: { indices.test_1.fields.geo.max_doc: 1 } + - match: { indices.test_1.fields.geo.doc_count: -1 } + - is_false: indices.test_1.fields.geo.min_value + - is_false: indices.test_1.fields.geo.max_value + - is_false: indices.test_1.fields.geo.min_value_as_string + - is_false: indices.test_1.fields.geo.max_value_as_string + - match: { indices.test_1.fields.geo.searchable: true } + - match: { indices.test_1.fields.geo.aggregatable: true } + - match: { indices.test_1.fields.geo.type: "string" } + - is_false: indices.test_2.fields.geo + - match: { indices.test_2.fields.geo_shape.max_doc: 1 } + - match: { indices.test_2.fields.geo_shape.doc_count: -1 } + - is_false: indices.test_2.fields.geo_shape.min_value + - is_false: indices.test_2.fields.geo_shape.max_value + - is_false: indices.test_2.fields.geo_shape.min_value_as_string + - is_false: indices.test_2.fields.geo_shape.max_value_as_string + - match: { indices.test_2.fields.geo_shape.searchable: true } + - match: { indices.test_2.fields.geo_shape.aggregatable: false } + - match: { indices.test_2.fields.geo_shape.type: "string" } + - is_false: indices.test_2.fields.geo - is_false: conflicts --- From 1f35d2532b29269dc2257bf51c6b4fb480e01360 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Wed, 4 Jan 2017 13:14:09 +0100 Subject: [PATCH 083/119] Fix BWC layer with field_stats and geo_point --- .../test/field_stats/10_basics.yaml | 22 ++----------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/field_stats/10_basics.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/field_stats/10_basics.yaml index 37aac939f7a..d0746d816ca 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/field_stats/10_basics.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/field_stats/10_basics.yaml @@ -60,7 +60,7 @@ setup: "Basic field stats": - do: field_stats: - fields: [foo, number, geo, geo_shape] + fields: [foo, number] - match: { indices._all.fields.foo.max_doc: 2 } - match: { indices._all.fields.foo.doc_count: 2 } @@ -80,15 +80,6 @@ setup: - match: { indices._all.fields.number.max_value: 456 } - match: { indices._all.fields.number.max_value_as_string: "456" } - match: { indices._all.fields.number.type: "integer" } - - match: { indices._all.fields.geo.type: "string" } - - match: { indices._all.fields.geo.max_doc: 1 } - - match: { indices._all.fields.geo.doc_count: -1 } - - match: { indices._all.fields.geo.searchable: true } - - match: { indices._all.fields.geo.aggregatable: true } - - match: { indices._all.fields.geo_shape.type: "string" } - - match: { indices._all.fields.geo_shape.max_doc: 1 } - - match: { indices._all.fields.geo_shape.searchable: true } - - match: { indices._all.fields.geo_shape.aggregatable: false } - is_false: conflicts @@ -127,7 +118,7 @@ setup: "Basic field stats with level set to indices": - do: field_stats: - fields: [foo, number, geo, geo_shape] + fields: [foo, number] level: indices - match: { indices.test_1.fields.foo.max_doc: 1 } @@ -148,10 +139,6 @@ setup: - match: { indices.test_1.fields.number.max_value: 123 } - match: { indices.test_1.fields.number.max_value_as_string: "123" } - match: { indices.test_1.fields.number.type: "integer" } - - match: { indices.test_1.fields.geo.type: "string" } - - match: { indices.test_1.fields.geo.max_doc: 1 } - - match: { indices.test_1.fields.geo.searchable: true } - - match: { indices.test_1.fields.geo.aggregatable: true } - match: { indices.test_2.fields.foo.max_doc: 1 } - match: { indices.test_2.fields.foo.doc_count: 1 } - match: { indices.test_2.fields.foo.min_value: "babar" } @@ -171,11 +158,6 @@ setup: - match: { indices.test_2.fields.number.max_value: 456 } - match: { indices.test_2.fields.number.max_value_as_string: "456" } - match: { indices.test_2.fields.number.type: "integer" } - - match: { indices.test_2.fields.geo_shape.type: "string" } - - match: { indices.test_2.fields.geo_shape.max_doc: 1 } - - match: { indices.test_2.fields.geo_shape.searchable: true } - - match: { indices.test_2.fields.geo_shape.aggregatable: false } - - is_false: indices.test_2.fields.geo - is_false: conflicts From 85b754f0e0552bb7b948387fca4f938cd7af6733 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Wed, 4 Jan 2017 12:07:17 -0500 Subject: [PATCH 084/119] [TEST] 5.x snapshot build is working again, so update the backwards compatibility tests for the allocation explain API to include 5.2.0 --- .../test/cluster.allocation_explain/10_basic.yaml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml index 06f9c9e12c2..9e1a57a4980 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml @@ -1,8 +1,7 @@ --- "cluster shard allocation explanation test": - skip: - # Set to 6.0.0 for now because the 5.2 snapshot builds are failing, causing these tests to run against an old 5.2 build - version: " - 6.0.0" + version: " - 5.1.99" reason: allocation explain api format is different in versions < 5.2.0 - do: @@ -40,8 +39,7 @@ --- "cluster shard allocation explanation test with empty request": - skip: - # Set to 6.0.0 for now because the 5.2 snapshot builds are failing, causing these tests to run against an old 5.2 build - version: " - 6.0.0" + version: " - 5.1.99" reason: allocation explain api format is different in versions < 5.2.0 - do: From f8998fece5f492244d2feb1c7d64e93582f67d5c Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 4 Jan 2017 19:03:52 +0100 Subject: [PATCH 085/119] Upgrade to lucene-6.4.0-snapshot-084f7a0. (#22413) --- buildSrc/version.properties | 2 +- ...ers-common-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ers-common-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...ard-codecs-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ard-codecs-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...ucene-core-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ucene-core-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...e-grouping-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...e-grouping-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...ighlighter-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ighlighter-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...ucene-join-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ucene-join-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...ene-memory-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ene-memory-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...ucene-misc-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ucene-misc-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...ne-queries-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ne-queries-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...ueryparser-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ueryparser-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...ne-sandbox-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ne-sandbox-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...ne-spatial-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ne-spatial-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...ial-extras-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ial-extras-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...-spatial3d-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...-spatial3d-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...ne-suggest-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...ne-suggest-6.4.0-snapshot-ec38570.jar.sha1 | 1 - .../GraphTokenStreamFiniteStrings.java | 291 --------- .../analysis/synonym/SynonymGraphFilter.java | 588 ------------------ .../org/apache/lucene/search/GraphQuery.java | 115 ---- .../index/analysis/AnalysisRegistry.java | 4 +- ...va => SynonymGraphTokenFilterFactory.java} | 4 +- .../index/mapper/NumberFieldMapper.java | 86 +-- .../index/search/MatchQuery.java | 146 ----- .../elasticsearch/bootstrap/security.policy | 4 +- .../bootstrap/test-framework.policy | 2 +- .../index/search/MatchQueryIT.java | 7 +- ...xpressions-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...xpressions-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...lyzers-icu-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...lyzers-icu-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...s-kuromoji-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...s-kuromoji-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...s-phonetic-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...s-phonetic-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...rs-smartcn-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...rs-smartcn-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...rs-stempel-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...rs-stempel-6.4.0-snapshot-ec38570.jar.sha1 | 1 - ...morfologik-6.4.0-snapshot-084f7a0.jar.sha1 | 1 + ...morfologik-6.4.0-snapshot-ec38570.jar.sha1 | 1 - .../AnalysisFactoryTestCase.java | 4 + 56 files changed, 44 insertions(+), 1253 deletions(-) create mode 100644 core/licenses/lucene-analyzers-common-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-analyzers-common-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-backward-codecs-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-backward-codecs-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-core-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-core-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-grouping-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-grouping-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-highlighter-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-highlighter-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-join-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-join-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-memory-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-memory-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-misc-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-misc-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-queries-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-queries-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-queryparser-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-queryparser-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-sandbox-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-sandbox-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-spatial-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-spatial-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-spatial-extras-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-spatial-extras-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-spatial3d-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-spatial3d-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 core/licenses/lucene-suggest-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 core/licenses/lucene-suggest-6.4.0-snapshot-ec38570.jar.sha1 delete mode 100644 core/src/main/java/org/apache/lucene/analysis/synonym/GraphTokenStreamFiniteStrings.java delete mode 100644 core/src/main/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java delete mode 100644 core/src/main/java/org/apache/lucene/search/GraphQuery.java rename core/src/main/java/org/elasticsearch/index/analysis/{SynonymGraphFilterFactory.java => SynonymGraphTokenFilterFactory.java} (88%) create mode 100644 modules/lang-expression/licenses/lucene-expressions-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 modules/lang-expression/licenses/lucene-expressions-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.4.0-snapshot-ec38570.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.4.0-snapshot-084f7a0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.4.0-snapshot-ec38570.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 44835f7227c..4fd4f26bd71 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 6.0.0-alpha1 -lucene = 6.4.0-snapshot-ec38570 +lucene = 6.4.0-snapshot-084f7a0 # optional dependencies spatial4j = 0.6 diff --git a/core/licenses/lucene-analyzers-common-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-analyzers-common-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..ffa2b42fb90 --- /dev/null +++ b/core/licenses/lucene-analyzers-common-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +ad1553dd2eed3a7cd5778bc7520821ac926b56df \ No newline at end of file diff --git a/core/licenses/lucene-analyzers-common-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-analyzers-common-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 5cab7b2fef1..00000000000 --- a/core/licenses/lucene-analyzers-common-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -770114e0188dd8b4f30e5878b4f6c8677cecf1be \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-backward-codecs-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..58587dc58b8 --- /dev/null +++ b/core/licenses/lucene-backward-codecs-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +dde630b1d09ff928a1f358951747cfad5c46b334 \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-backward-codecs-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 02677cb1ff8..00000000000 --- a/core/licenses/lucene-backward-codecs-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f4eb0257e8419beaa9f84da6a51375fda4e491f2 \ No newline at end of file diff --git a/core/licenses/lucene-core-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-core-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..66a9a3208e6 --- /dev/null +++ b/core/licenses/lucene-core-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +1789bff323a0c013b126f4e51f1f269ebc631277 \ No newline at end of file diff --git a/core/licenses/lucene-core-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-core-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index ea81fbaeb56..00000000000 --- a/core/licenses/lucene-core-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c80ad16cd36c41012abb8a8bb1c7328c6d680b4a \ No newline at end of file diff --git a/core/licenses/lucene-grouping-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-grouping-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..74441065e0d --- /dev/null +++ b/core/licenses/lucene-grouping-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +8cb17916d0e63705f1f715fe0d03ed32916a077a \ No newline at end of file diff --git a/core/licenses/lucene-grouping-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-grouping-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index d4442ded938..00000000000 --- a/core/licenses/lucene-grouping-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -070d4e370f4fe0b8a04b2bce5b4381201b0c783f \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-highlighter-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..9aaa848b476 --- /dev/null +++ b/core/licenses/lucene-highlighter-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +79d6ba8fa629a52ad3eb829d085836f5fd2f7a87 \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-highlighter-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index e6fc043a287..00000000000 --- a/core/licenses/lucene-highlighter-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -131d9a86f5943675493a85def0e692842f396458 \ No newline at end of file diff --git a/core/licenses/lucene-join-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-join-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..4ea4443a650 --- /dev/null +++ b/core/licenses/lucene-join-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +19794d8f15402c991d9533bfcd67e2e7a34677ef \ No newline at end of file diff --git a/core/licenses/lucene-join-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-join-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 6c90673f498..00000000000 --- a/core/licenses/lucene-join-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -385b2202036b50a764e4d2b032e21496b74a1c8e \ No newline at end of file diff --git a/core/licenses/lucene-memory-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-memory-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..8128c115c13 --- /dev/null +++ b/core/licenses/lucene-memory-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +33e42d3019e072752258bd778912c8d4365470a1 \ No newline at end of file diff --git a/core/licenses/lucene-memory-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-memory-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index bdb3a168612..00000000000 --- a/core/licenses/lucene-memory-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e8742a44ef4849a17d5e59ef36e9a52a8f2370c2 \ No newline at end of file diff --git a/core/licenses/lucene-misc-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-misc-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..d55fa646119 --- /dev/null +++ b/core/licenses/lucene-misc-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +a1b3271b3800da349c8b98f7b1a25b2b6192252a \ No newline at end of file diff --git a/core/licenses/lucene-misc-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-misc-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index e29fc5f139c..00000000000 --- a/core/licenses/lucene-misc-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ce2e4948fb66393a34f4200a6131cfde43e47bd \ No newline at end of file diff --git a/core/licenses/lucene-queries-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-queries-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..99948c1260d --- /dev/null +++ b/core/licenses/lucene-queries-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +792716d805fcc5091931874c2f2f86f35da8b401 \ No newline at end of file diff --git a/core/licenses/lucene-queries-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-queries-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 4998ff5b2e4..00000000000 --- a/core/licenses/lucene-queries-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c1c385a597ce797b0049d9b2281b09593e1488a \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-queryparser-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..06cade53075 --- /dev/null +++ b/core/licenses/lucene-queryparser-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +c3f8bbc6ebe8d31da41fcdb1fa73f13d8170ee62 \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-queryparser-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 9ba51f22f25..00000000000 --- a/core/licenses/lucene-queryparser-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fafaa22906c067e6894f9f2b18ad03ded98e2f38 \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-sandbox-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..33dc3fac466 --- /dev/null +++ b/core/licenses/lucene-sandbox-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +263901a19686c6cce7dd5c32a4934c42c62454dc \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-sandbox-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index cce2045942b..00000000000 --- a/core/licenses/lucene-sandbox-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19c64a84617f42bb4c11b1e266df4009cd37fdd0 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-spatial-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..8bcd0086722 --- /dev/null +++ b/core/licenses/lucene-spatial-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +85426164fcc264a7e3bacc1a70602513540a261a \ No newline at end of file diff --git a/core/licenses/lucene-spatial-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-spatial-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 8169bea2fae..00000000000 --- a/core/licenses/lucene-spatial-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bc8613fb61c0ae95dd3680b0f65e3380c3fd0d6c \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-spatial-extras-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..d2041b9a4dd --- /dev/null +++ b/core/licenses/lucene-spatial-extras-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +332cbfaa6b1ee0bf4d820018872988e15cd413d2 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-spatial-extras-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 2614704c057..00000000000 --- a/core/licenses/lucene-spatial-extras-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0fa2c3e722294e863f3c70a15e97a18397391fb4 \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-spatial3d-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..b699c89a6d3 --- /dev/null +++ b/core/licenses/lucene-spatial3d-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +3fe3e902b971f4aa2b4a3a417ba5dcf83e968428 \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-spatial3d-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 9b1c45581a1..00000000000 --- a/core/licenses/lucene-spatial3d-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -db74c6313965ffdd10d9b19be2eed4ae2c76d2e3 \ No newline at end of file diff --git a/core/licenses/lucene-suggest-6.4.0-snapshot-084f7a0.jar.sha1 b/core/licenses/lucene-suggest-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..69bb10621f1 --- /dev/null +++ b/core/licenses/lucene-suggest-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +c4863fe45853163abfbe5c8b8bd7bdcf9a9c7b40 \ No newline at end of file diff --git a/core/licenses/lucene-suggest-6.4.0-snapshot-ec38570.jar.sha1 b/core/licenses/lucene-suggest-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 91841f474ef..00000000000 --- a/core/licenses/lucene-suggest-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b85ae1121b5fd56df985615a3cdd7b3879e9b92d \ No newline at end of file diff --git a/core/src/main/java/org/apache/lucene/analysis/synonym/GraphTokenStreamFiniteStrings.java b/core/src/main/java/org/apache/lucene/analysis/synonym/GraphTokenStreamFiniteStrings.java deleted file mode 100644 index 3d806588eca..00000000000 --- a/core/src/main/java/org/apache/lucene/analysis/synonym/GraphTokenStreamFiniteStrings.java +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.lucene.analysis.synonym; - -import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; - -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.tokenattributes.BytesTermAttribute; -import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; -import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.IntsRef; -import org.apache.lucene.util.automaton.Automaton; -import org.apache.lucene.util.automaton.FiniteStringsIterator; -import org.apache.lucene.util.automaton.Operations; -import org.apache.lucene.util.automaton.Transition; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * Creates a list of {@link TokenStream} where each stream is the tokens that make up a finite string in graph token stream. To do this, - * the graph token stream is converted to an {@link Automaton} and from there we use a {@link FiniteStringsIterator} to collect the various - * token streams for each finite string. - */ -public class GraphTokenStreamFiniteStrings { - private final Automaton.Builder builder; - Automaton det; - private final Map termToID = new HashMap<>(); - private final Map idToTerm = new HashMap<>(); - private int anyTermID = -1; - - public GraphTokenStreamFiniteStrings() { - this.builder = new Automaton.Builder(); - } - - private static class BytesRefArrayTokenStream extends TokenStream { - private final BytesTermAttribute termAtt = addAttribute(BytesTermAttribute.class); - private final BytesRef[] terms; - private int offset; - - BytesRefArrayTokenStream(BytesRef[] terms) { - this.terms = terms; - offset = 0; - } - - @Override - public boolean incrementToken() throws IOException { - if (offset < terms.length) { - clearAttributes(); - termAtt.setBytesRef(terms[offset]); - offset = offset + 1; - return true; - } - - return false; - } - } - - /** - * Gets - */ - public List getTokenStreams(final TokenStream in) throws IOException { - // build automation - build(in); - - List tokenStreams = new ArrayList<>(); - final FiniteStringsIterator finiteStrings = new FiniteStringsIterator(det); - for (IntsRef string; (string = finiteStrings.next()) != null; ) { - final BytesRef[] tokens = new BytesRef[string.length]; - for (int idx = string.offset, len = string.offset + string.length; idx < len; idx++) { - tokens[idx - string.offset] = idToTerm.get(string.ints[idx]); - } - - tokenStreams.add(new BytesRefArrayTokenStream(tokens)); - } - - return tokenStreams; - } - - private void build(final TokenStream in) throws IOException { - if (det != null) { - throw new IllegalStateException("Automation already built"); - } - - final TermToBytesRefAttribute termBytesAtt = in.addAttribute(TermToBytesRefAttribute.class); - final PositionIncrementAttribute posIncAtt = in.addAttribute(PositionIncrementAttribute.class); - final PositionLengthAttribute posLengthAtt = in.addAttribute(PositionLengthAttribute.class); - final OffsetAttribute offsetAtt = in.addAttribute(OffsetAttribute.class); - - in.reset(); - - int pos = -1; - int lastPos = 0; - int maxOffset = 0; - int maxPos = -1; - int state = -1; - while (in.incrementToken()) { - int posInc = posIncAtt.getPositionIncrement(); - assert pos > -1 || posInc > 0; - - if (posInc > 1) { - throw new IllegalArgumentException("cannot handle holes; to accept any term, use '*' term"); - } - - if (posInc > 0) { - // New node: - pos += posInc; - } - - int endPos = pos + posLengthAtt.getPositionLength(); - while (state < endPos) { - state = createState(); - } - - BytesRef term = termBytesAtt.getBytesRef(); - //System.out.println(pos + "-" + endPos + ": " + term.utf8ToString() + ": posInc=" + posInc); - if (term.length == 1 && term.bytes[term.offset] == (byte) '*') { - addAnyTransition(pos, endPos); - } else { - addTransition(pos, endPos, term); - } - - maxOffset = Math.max(maxOffset, offsetAtt.endOffset()); - maxPos = Math.max(maxPos, endPos); - } - - in.end(); - - // TODO: look at endOffset? ts2a did... - - // TODO: this (setting "last" state as the only accept state) may be too simplistic? - setAccept(state, true); - finish(); - } - - /** - * Returns a new state; state 0 is always the initial state. - */ - private int createState() { - return builder.createState(); - } - - /** - * Marks the specified state as accept or not. - */ - private void setAccept(int state, boolean accept) { - builder.setAccept(state, accept); - } - - /** - * Adds a transition to the automaton. - */ - private void addTransition(int source, int dest, String term) { - addTransition(source, dest, new BytesRef(term)); - } - - /** - * Adds a transition to the automaton. - */ - private void addTransition(int source, int dest, BytesRef term) { - if (term == null) { - throw new NullPointerException("term should not be null"); - } - builder.addTransition(source, dest, getTermID(term)); - } - - /** - * Adds a transition matching any term. - */ - private void addAnyTransition(int source, int dest) { - builder.addTransition(source, dest, getTermID(null)); - } - - /** - * Call this once you are done adding states/transitions. - */ - private void finish() { - finish(DEFAULT_MAX_DETERMINIZED_STATES); - } - - /** - * Call this once you are done adding states/transitions. - * - * @param maxDeterminizedStates Maximum number of states created when determinizing the automaton. Higher numbers allow this operation - * to consume more memory but allow more complex automatons. - */ - private void finish(int maxDeterminizedStates) { - Automaton automaton = builder.finish(); - - // System.out.println("before det:\n" + automaton.toDot()); - - Transition t = new Transition(); - - // TODO: should we add "eps back to initial node" for all states, - // and det that? then we don't need to revisit initial node at - // every position? but automaton could blow up? And, this makes it - // harder to skip useless positions at search time? - - if (anyTermID != -1) { - - // Make sure there are no leading or trailing ANY: - int count = automaton.initTransition(0, t); - for (int i = 0; i < count; i++) { - automaton.getNextTransition(t); - if (anyTermID >= t.min && anyTermID <= t.max) { - throw new IllegalStateException("automaton cannot lead with an ANY transition"); - } - } - - int numStates = automaton.getNumStates(); - for (int i = 0; i < numStates; i++) { - count = automaton.initTransition(i, t); - for (int j = 0; j < count; j++) { - automaton.getNextTransition(t); - if (automaton.isAccept(t.dest) && anyTermID >= t.min && anyTermID <= t.max) { - throw new IllegalStateException("automaton cannot end with an ANY transition"); - } - } - } - - int termCount = termToID.size(); - - // We have to carefully translate these transitions so automaton - // realizes they also match all other terms: - Automaton newAutomaton = new Automaton(); - for (int i = 0; i < numStates; i++) { - newAutomaton.createState(); - newAutomaton.setAccept(i, automaton.isAccept(i)); - } - - for (int i = 0; i < numStates; i++) { - count = automaton.initTransition(i, t); - for (int j = 0; j < count; j++) { - automaton.getNextTransition(t); - int min, max; - if (t.min <= anyTermID && anyTermID <= t.max) { - // Match any term - min = 0; - max = termCount - 1; - } else { - min = t.min; - max = t.max; - } - newAutomaton.addTransition(t.source, t.dest, min, max); - } - } - newAutomaton.finishState(); - automaton = newAutomaton; - } - - det = Operations.removeDeadStates(Operations.determinize(automaton, maxDeterminizedStates)); - } - - private int getTermID(BytesRef term) { - Integer id = termToID.get(term); - if (id == null) { - id = termToID.size(); - if (term != null) { - term = BytesRef.deepCopyOf(term); - } - termToID.put(term, id); - idToTerm.put(id, term); - if (term == null) { - anyTermID = id; - } - } - - return id; - } -} diff --git a/core/src/main/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java b/core/src/main/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java deleted file mode 100644 index f2c27679ab6..00000000000 --- a/core/src/main/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java +++ /dev/null @@ -1,588 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.lucene.analysis.synonym; - -import org.apache.lucene.analysis.TokenFilter; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.analysis.tokenattributes.FlagsAttribute; -import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; -import org.apache.lucene.analysis.tokenattributes.TypeAttribute; -import org.apache.lucene.store.ByteArrayDataInput; -import org.apache.lucene.util.AttributeSource; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.CharsRefBuilder; -import org.apache.lucene.util.RollingBuffer; -import org.apache.lucene.util.fst.FST; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.LinkedList; -import java.util.List; - -// TODO: maybe we should resolve token -> wordID then run -// FST on wordIDs, for better perf? - -// TODO: a more efficient approach would be Aho/Corasick's -// algorithm -// http://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_string_matching_algorithm -// It improves over the current approach here -// because it does not fully re-start matching at every -// token. For example if one pattern is "a b c x" -// and another is "b c d" and the input is "a b c d", on -// trying to parse "a b c x" but failing when you got to x, -// rather than starting over again your really should -// immediately recognize that "b c d" matches at the next -// input. I suspect this won't matter that much in -// practice, but it's possible on some set of synonyms it -// will. We'd have to modify Aho/Corasick to enforce our -// conflict resolving (eg greedy matching) because that algo -// finds all matches. This really amounts to adding a .* -// closure to the FST and then determinizing it. -// -// Another possible solution is described at http://www.cis.uni-muenchen.de/people/Schulz/Pub/dictle5.ps - -/** - * Applies single- or multi-token synonyms from a {@link SynonymMap} - * to an incoming {@link TokenStream}, producing a fully correct graph - * output. This is a replacement for {@link SynonymFilter}, which produces - * incorrect graphs for multi-token synonyms. - * - * NOTE: this cannot consume an incoming graph; results will - * be undefined. - */ -public final class SynonymGraphFilter extends TokenFilter { - - public static final String TYPE_SYNONYM = "SYNONYM"; - public static final int GRAPH_FLAG = 8; - - private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); - private final PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class); - private final FlagsAttribute flagsAtt = addAttribute(FlagsAttribute.class); - - private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class); - private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); - - private final SynonymMap synonyms; - private final boolean ignoreCase; - - private final FST fst; - - private final FST.BytesReader fstReader; - private final FST.Arc scratchArc; - private final ByteArrayDataInput bytesReader = new ByteArrayDataInput(); - private final BytesRef scratchBytes = new BytesRef(); - private final CharsRefBuilder scratchChars = new CharsRefBuilder(); - private final LinkedList outputBuffer = new LinkedList<>(); - - private int nextNodeOut; - private int lastNodeOut; - private int maxLookaheadUsed; - - // For testing: - private int captureCount; - - private boolean liveToken; - - // Start/end offset of the current match: - private int matchStartOffset; - private int matchEndOffset; - - // True once the input TokenStream is exhausted: - private boolean finished; - - private int lookaheadNextRead; - private int lookaheadNextWrite; - - private RollingBuffer lookahead = new RollingBuffer() { - @Override - protected BufferedInputToken newInstance() { - return new BufferedInputToken(); - } - }; - - static class BufferedInputToken implements RollingBuffer.Resettable { - final CharsRefBuilder term = new CharsRefBuilder(); - AttributeSource.State state; - int startOffset = -1; - int endOffset = -1; - - @Override - public void reset() { - state = null; - term.clear(); - - // Intentionally invalid to ferret out bugs: - startOffset = -1; - endOffset = -1; - } - } - - static class BufferedOutputToken { - final String term; - - // Non-null if this was an incoming token: - final State state; - - final int startNode; - final int endNode; - - public BufferedOutputToken(State state, String term, int startNode, int endNode) { - this.state = state; - this.term = term; - this.startNode = startNode; - this.endNode = endNode; - } - } - - public SynonymGraphFilter(TokenStream input, SynonymMap synonyms, boolean ignoreCase) { - super(input); - this.synonyms = synonyms; - this.fst = synonyms.fst; - if (fst == null) { - throw new IllegalArgumentException("fst must be non-null"); - } - this.fstReader = fst.getBytesReader(); - scratchArc = new FST.Arc<>(); - this.ignoreCase = ignoreCase; - } - - @Override - public boolean incrementToken() throws IOException { - //System.out.println("\nS: incrToken lastNodeOut=" + lastNodeOut + " nextNodeOut=" + nextNodeOut); - - assert lastNodeOut <= nextNodeOut; - - if (outputBuffer.isEmpty() == false) { - // We still have pending outputs from a prior synonym match: - releaseBufferedToken(); - //System.out.println(" syn: ret buffered=" + this); - assert liveToken == false; - return true; - } - - // Try to parse a new synonym match at the current token: - - if (parse()) { - // A new match was found: - releaseBufferedToken(); - //System.out.println(" syn: after parse, ret buffered=" + this); - assert liveToken == false; - return true; - } - - if (lookaheadNextRead == lookaheadNextWrite) { - - // Fast path: parse pulled one token, but it didn't match - // the start for any synonym, so we now return it "live" w/o having - // cloned all of its atts: - if (finished) { - //System.out.println(" syn: ret END"); - return false; - } - - assert liveToken; - liveToken = false; - - // NOTE: no need to change posInc since it's relative, i.e. whatever - // node our output is upto will just increase by the incoming posInc. - // We also don't need to change posLen, but only because we cannot - // consume a graph, so the incoming token can never span a future - // synonym match. - - } else { - // We still have buffered lookahead tokens from a previous - // parse attempt that required lookahead; just replay them now: - //System.out.println(" restore buffer"); - assert lookaheadNextRead < lookaheadNextWrite : "read=" + lookaheadNextRead + " write=" + lookaheadNextWrite; - BufferedInputToken token = lookahead.get(lookaheadNextRead); - lookaheadNextRead++; - - restoreState(token.state); - - lookahead.freeBefore(lookaheadNextRead); - - //System.out.println(" after restore offset=" + offsetAtt.startOffset() + "-" + offsetAtt.endOffset()); - assert liveToken == false; - } - - lastNodeOut += posIncrAtt.getPositionIncrement(); - nextNodeOut = lastNodeOut + posLenAtt.getPositionLength(); - - //System.out.println(" syn: ret lookahead=" + this); - - return true; - } - - private void releaseBufferedToken() throws IOException { - //System.out.println(" releaseBufferedToken"); - - BufferedOutputToken token = outputBuffer.pollFirst(); - - if (token.state != null) { - // This is an original input token (keepOrig=true case): - //System.out.println(" hasState"); - restoreState(token.state); - //System.out.println(" startOffset=" + offsetAtt.startOffset() + " endOffset=" + offsetAtt.endOffset()); - } else { - clearAttributes(); - //System.out.println(" no state"); - termAtt.append(token.term); - - // We better have a match already: - assert matchStartOffset != -1; - - offsetAtt.setOffset(matchStartOffset, matchEndOffset); - //System.out.println(" startOffset=" + matchStartOffset + " endOffset=" + matchEndOffset); - typeAtt.setType(TYPE_SYNONYM); - } - - //System.out.println(" lastNodeOut=" + lastNodeOut); - //System.out.println(" term=" + termAtt); - - posIncrAtt.setPositionIncrement(token.startNode - lastNodeOut); - lastNodeOut = token.startNode; - posLenAtt.setPositionLength(token.endNode - token.startNode); - flagsAtt.setFlags(flagsAtt.getFlags() | GRAPH_FLAG); // set the graph flag - } - - /** - * Scans the next input token(s) to see if a synonym matches. Returns true - * if a match was found. - */ - private boolean parse() throws IOException { - // System.out.println(Thread.currentThread().getName() + ": S: parse: " + System.identityHashCode(this)); - - // Holds the longest match we've seen so far: - BytesRef matchOutput = null; - int matchInputLength = 0; - - BytesRef pendingOutput = fst.outputs.getNoOutput(); - fst.getFirstArc(scratchArc); - - assert scratchArc.output == fst.outputs.getNoOutput(); - - // How many tokens in the current match - int matchLength = 0; - boolean doFinalCapture = false; - - int lookaheadUpto = lookaheadNextRead; - matchStartOffset = -1; - - byToken: - while (true) { - //System.out.println(" cycle lookaheadUpto=" + lookaheadUpto + " maxPos=" + lookahead.getMaxPos()); - - // Pull next token's chars: - final char[] buffer; - final int bufferLen; - final int inputEndOffset; - - if (lookaheadUpto <= lookahead.getMaxPos()) { - // Still in our lookahead buffer - BufferedInputToken token = lookahead.get(lookaheadUpto); - lookaheadUpto++; - buffer = token.term.chars(); - bufferLen = token.term.length(); - inputEndOffset = token.endOffset; - //System.out.println(" use buffer now max=" + lookahead.getMaxPos()); - if (matchStartOffset == -1) { - matchStartOffset = token.startOffset; - } - } else { - - // We used up our lookahead buffer of input tokens - // -- pull next real input token: - - assert finished || liveToken == false; - - if (finished) { - //System.out.println(" break: finished"); - break; - } else if (input.incrementToken()) { - //System.out.println(" input.incrToken"); - liveToken = true; - buffer = termAtt.buffer(); - bufferLen = termAtt.length(); - if (matchStartOffset == -1) { - matchStartOffset = offsetAtt.startOffset(); - } - inputEndOffset = offsetAtt.endOffset(); - - lookaheadUpto++; - } else { - // No more input tokens - finished = true; - //System.out.println(" break: now set finished"); - break; - } - } - - matchLength++; - //System.out.println(" cycle term=" + new String(buffer, 0, bufferLen)); - - // Run each char in this token through the FST: - int bufUpto = 0; - while (bufUpto < bufferLen) { - final int codePoint = Character.codePointAt(buffer, bufUpto, bufferLen); - if (fst.findTargetArc(ignoreCase ? Character.toLowerCase(codePoint) : codePoint, scratchArc, scratchArc, fstReader) == - null) { - break byToken; - } - - // Accum the output - pendingOutput = fst.outputs.add(pendingOutput, scratchArc.output); - bufUpto += Character.charCount(codePoint); - } - - assert bufUpto == bufferLen; - - // OK, entire token matched; now see if this is a final - // state in the FST (a match): - if (scratchArc.isFinal()) { - matchOutput = fst.outputs.add(pendingOutput, scratchArc.nextFinalOutput); - matchInputLength = matchLength; - matchEndOffset = inputEndOffset; - //System.out.println(" ** match"); - } - - // See if the FST can continue matching (ie, needs to - // see the next input token): - if (fst.findTargetArc(SynonymMap.WORD_SEPARATOR, scratchArc, scratchArc, fstReader) == null) { - // No further rules can match here; we're done - // searching for matching rules starting at the - // current input position. - break; - } else { - // More matching is possible -- accum the output (if - // any) of the WORD_SEP arc: - pendingOutput = fst.outputs.add(pendingOutput, scratchArc.output); - doFinalCapture = true; - if (liveToken) { - capture(); - } - } - } - - if (doFinalCapture && liveToken && finished == false) { - // Must capture the final token if we captured any prior tokens: - capture(); - } - - if (matchOutput != null) { - - if (liveToken) { - // Single input token synonym; we must buffer it now: - capture(); - } - - // There is a match! - bufferOutputTokens(matchOutput, matchInputLength); - lookaheadNextRead += matchInputLength; - //System.out.println(" precmatch; set lookaheadNextRead=" + lookaheadNextRead + " now max=" + lookahead.getMaxPos()); - lookahead.freeBefore(lookaheadNextRead); - //System.out.println(" match; set lookaheadNextRead=" + lookaheadNextRead + " now max=" + lookahead.getMaxPos()); - return true; - } else { - //System.out.println(" no match; lookaheadNextRead=" + lookaheadNextRead); - return false; - } - - //System.out.println(" parse done inputSkipCount=" + inputSkipCount + " nextRead=" + nextRead + " nextWrite=" + nextWrite); - } - - /** - * Expands the output graph into the necessary tokens, adding - * synonyms as side paths parallel to the input tokens, and - * buffers them in the output token buffer. - */ - private void bufferOutputTokens(BytesRef bytes, int matchInputLength) { - bytesReader.reset(bytes.bytes, bytes.offset, bytes.length); - - final int code = bytesReader.readVInt(); - final boolean keepOrig = (code & 0x1) == 0; - //System.out.println(" buffer: keepOrig=" + keepOrig + " matchInputLength=" + matchInputLength); - - // How many nodes along all paths; we need this to assign the - // node ID for the final end node where all paths merge back: - int totalPathNodes; - if (keepOrig) { - assert matchInputLength > 0; - totalPathNodes = matchInputLength - 1; - } else { - totalPathNodes = 0; - } - - // How many synonyms we will insert over this match: - final int count = code >>> 1; - - // TODO: we could encode this instead into the FST: - - // 1st pass: count how many new nodes we need - List> paths = new ArrayList<>(); - for (int outputIDX = 0; outputIDX < count; outputIDX++) { - int wordID = bytesReader.readVInt(); - synonyms.words.get(wordID, scratchBytes); - scratchChars.copyUTF8Bytes(scratchBytes); - int lastStart = 0; - - List path = new ArrayList<>(); - paths.add(path); - int chEnd = scratchChars.length(); - for (int chUpto = 0; chUpto <= chEnd; chUpto++) { - if (chUpto == chEnd || scratchChars.charAt(chUpto) == SynonymMap.WORD_SEPARATOR) { - path.add(new String(scratchChars.chars(), lastStart, chUpto - lastStart)); - lastStart = 1 + chUpto; - } - } - - assert path.size() > 0; - totalPathNodes += path.size() - 1; - } - //System.out.println(" totalPathNodes=" + totalPathNodes); - - // 2nd pass: buffer tokens for the graph fragment - - // NOTE: totalPathNodes will be 0 in the case where the matched - // input is a single token and all outputs are also a single token - - // We "spawn" a side-path for each of the outputs for this matched - // synonym, all ending back at this end node: - - int startNode = nextNodeOut; - - int endNode = startNode + totalPathNodes + 1; - //System.out.println(" " + paths.size() + " new side-paths"); - - // First, fanout all tokens departing start node for these new side paths: - int newNodeCount = 0; - for (List path : paths) { - int pathEndNode; - //System.out.println(" path size=" + path.size()); - if (path.size() == 1) { - // Single token output, so there are no intermediate nodes: - pathEndNode = endNode; - } else { - pathEndNode = nextNodeOut + newNodeCount + 1; - newNodeCount += path.size() - 1; - } - outputBuffer.add(new BufferedOutputToken(null, path.get(0), startNode, pathEndNode)); - } - - // We must do the original tokens last, else the offsets "go backwards": - if (keepOrig) { - BufferedInputToken token = lookahead.get(lookaheadNextRead); - int inputEndNode; - if (matchInputLength == 1) { - // Single token matched input, so there are no intermediate nodes: - inputEndNode = endNode; - } else { - inputEndNode = nextNodeOut + newNodeCount + 1; - } - - //System.out.println(" keepOrig first token: " + token.term); - - outputBuffer.add(new BufferedOutputToken(token.state, token.term.toString(), startNode, inputEndNode)); - } - - nextNodeOut = endNode; - - // Do full side-path for each syn output: - for (int pathID = 0; pathID < paths.size(); pathID++) { - List path = paths.get(pathID); - if (path.size() > 1) { - int lastNode = outputBuffer.get(pathID).endNode; - for (int i = 1; i < path.size() - 1; i++) { - outputBuffer.add(new BufferedOutputToken(null, path.get(i), lastNode, lastNode + 1)); - lastNode++; - } - outputBuffer.add(new BufferedOutputToken(null, path.get(path.size() - 1), lastNode, endNode)); - } - } - - if (keepOrig && matchInputLength > 1) { - // Do full "side path" with the original tokens: - int lastNode = outputBuffer.get(paths.size()).endNode; - for (int i = 1; i < matchInputLength - 1; i++) { - BufferedInputToken token = lookahead.get(lookaheadNextRead + i); - outputBuffer.add(new BufferedOutputToken(token.state, token.term.toString(), lastNode, lastNode + 1)); - lastNode++; - } - BufferedInputToken token = lookahead.get(lookaheadNextRead + matchInputLength - 1); - outputBuffer.add(new BufferedOutputToken(token.state, token.term.toString(), lastNode, endNode)); - } - - /* - System.out.println(" after buffer: " + outputBuffer.size() + " tokens:"); - for(BufferedOutputToken token : outputBuffer) { - System.out.println(" tok: " + token.term + " startNode=" + token.startNode + " endNode=" + token.endNode); - } - */ - } - - /** - * Buffers the current input token into lookahead buffer. - */ - private void capture() { - assert liveToken; - liveToken = false; - BufferedInputToken token = lookahead.get(lookaheadNextWrite); - lookaheadNextWrite++; - - token.state = captureState(); - token.startOffset = offsetAtt.startOffset(); - token.endOffset = offsetAtt.endOffset(); - assert token.term.length() == 0; - token.term.append(termAtt); - - captureCount++; - maxLookaheadUsed = Math.max(maxLookaheadUsed, lookahead.getBufferSize()); - //System.out.println(" maxLookaheadUsed=" + maxLookaheadUsed); - } - - @Override - public void reset() throws IOException { - super.reset(); - lookahead.reset(); - lookaheadNextWrite = 0; - lookaheadNextRead = 0; - captureCount = 0; - lastNodeOut = -1; - nextNodeOut = 0; - matchStartOffset = -1; - matchEndOffset = -1; - finished = false; - liveToken = false; - outputBuffer.clear(); - maxLookaheadUsed = 0; - //System.out.println("S: reset"); - } - - // for testing - int getCaptureCount() { - return captureCount; - } - - // for testing - int getMaxLookaheadUsed() { - return maxLookaheadUsed; - } -} diff --git a/core/src/main/java/org/apache/lucene/search/GraphQuery.java b/core/src/main/java/org/apache/lucene/search/GraphQuery.java deleted file mode 100644 index cad316d701c..00000000000 --- a/core/src/main/java/org/apache/lucene/search/GraphQuery.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.lucene.search; - -import org.apache.lucene.index.IndexReader; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Objects; - -/** - * A query that wraps multiple sub-queries generated from a graph token stream. - */ -public final class GraphQuery extends Query { - private final Query[] queries; - private final boolean hasBoolean; - - /** - * Constructor sets the queries and checks if any of them are - * a boolean query. - * - * @param queries the non-null array of queries - */ - public GraphQuery(Query... queries) { - this.queries = Objects.requireNonNull(queries).clone(); - for (Query query : queries) { - if (query instanceof BooleanQuery) { - hasBoolean = true; - return; - } - } - hasBoolean = false; - } - - /** - * Gets the queries - * - * @return unmodifiable list of Query - */ - public List getQueries() { - return Collections.unmodifiableList(Arrays.asList(queries)); - } - - /** - * If there is at least one boolean query or not. - * - * @return true if there is a boolean, false if not - */ - public boolean hasBoolean() { - return hasBoolean; - } - - /** - * Rewrites to a single query or a boolean query where each query is a SHOULD clause. - */ - @Override - public Query rewrite(IndexReader reader) throws IOException { - if (queries.length == 0) { - return new BooleanQuery.Builder().build(); - } - - if (queries.length == 1) { - return queries[0]; - } - - BooleanQuery.Builder q = new BooleanQuery.Builder(); - q.setDisableCoord(true); - for (Query clause : queries) { - q.add(clause, BooleanClause.Occur.SHOULD); - } - - return q.build(); - } - - @Override - public String toString(String field) { - StringBuilder builder = new StringBuilder("Graph("); - for (int i = 0; i < queries.length; i++) { - if (i != 0) { - builder.append(", "); - } - builder.append(Objects.toString(queries[i])); - } - builder.append(")"); - return builder.toString(); - } - - @Override - public boolean equals(Object other) { - return sameClassAs(other) && - Arrays.equals(queries, ((GraphQuery) other).queries); - } - - @Override - public int hashCode() { - return 31 * classHash() + Arrays.hashCode(queries); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 25ef5d1885f..f2f13479c9d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -167,7 +167,7 @@ public final class AnalysisRegistry implements Closeable { * hide internal data-structures as much as possible. */ tokenFilters.put("synonym", requriesAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings))); - tokenFilters.put("synonym_graph", requriesAnalysisSettings((is, env, name, settings) -> new SynonymGraphFilterFactory(is, env, this, name, settings))); + tokenFilters.put("synonym_graph", requriesAnalysisSettings((is, env, name, settings) -> new SynonymGraphTokenFilterFactory(is, env, this, name, settings))); return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.tokenFilterFactories); } @@ -231,7 +231,7 @@ public final class AnalysisRegistry implements Closeable { if ("synonym".equals(typeName)) { return requriesAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings)); } else if ("synonym_graph".equals(typeName)) { - return requriesAnalysisSettings((is, env, name, settings) -> new SynonymGraphFilterFactory(is, env, this, name, settings)); + return requriesAnalysisSettings((is, env, name, settings) -> new SynonymGraphTokenFilterFactory(is, env, this, name, settings)); } else { return getAnalysisProvider(Component.FILTER, tokenFilters, tokenFilter, typeName); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SynonymGraphFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/SynonymGraphTokenFilterFactory.java similarity index 88% rename from core/src/main/java/org/elasticsearch/index/analysis/SynonymGraphFilterFactory.java rename to core/src/main/java/org/elasticsearch/index/analysis/SynonymGraphTokenFilterFactory.java index da9b11b9785..cfb37f0b075 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/SynonymGraphFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/SynonymGraphTokenFilterFactory.java @@ -27,8 +27,8 @@ import org.elasticsearch.index.IndexSettings; import java.io.IOException; -public class SynonymGraphFilterFactory extends SynonymTokenFilterFactory { - public SynonymGraphFilterFactory(IndexSettings indexSettings, Environment env, AnalysisRegistry analysisRegistry, +public class SynonymGraphTokenFilterFactory extends SynonymTokenFilterFactory { + public SynonymGraphTokenFilterFactory(IndexSettings indexSettings, Environment env, AnalysisRegistry analysisRegistry, String name, Settings settings) throws IOException { super(indexSettings, env, analysisRegistry, name, settings); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index ac96fe8199e..c922cd8b54c 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -188,30 +188,6 @@ public class NumberFieldMapper extends FieldMapper { return HalfFloatPoint.newSetQuery(field, v); } - private float nextDown(float f) { - // HalfFloatPoint.nextDown considers that -0 is the same as +0 - // while point ranges are consistent with Float.compare, so - // they consider that -0 < +0, so we explicitly make sure - // that nextDown(+0) returns -0 - if (Float.floatToIntBits(f) == Float.floatToIntBits(0f)) { - return -0f; - } else { - return HalfFloatPoint.nextDown(f); - } - } - - private float nextUp(float f) { - // HalfFloatPoint.nextUp considers that -0 is the same as +0 - // while point ranges are consistent with Float.compare, so - // they consider that -0 < +0, so we explicitly make sure - // that nextUp(-0) returns +0 - if (Float.floatToIntBits(f) == Float.floatToIntBits(-0f)) { - return +0f; - } else { - return HalfFloatPoint.nextUp(f); - } - } - @Override Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { @@ -220,16 +196,16 @@ public class NumberFieldMapper extends FieldMapper { if (lowerTerm != null) { l = parse(lowerTerm, false); if (includeLower) { - l = nextDown(l); + l = HalfFloatPoint.nextDown(l); } l = HalfFloatPoint.nextUp(l); } if (upperTerm != null) { u = parse(upperTerm, false); if (includeUpper) { - u = nextUp(u); + u = HalfFloatPoint.nextUp(u); } - u = nextDown(u); + u = HalfFloatPoint.nextDown(u); } return HalfFloatPoint.newRangeQuery(field, l, u); } @@ -302,30 +278,6 @@ public class NumberFieldMapper extends FieldMapper { return FloatPoint.newSetQuery(field, v); } - private float nextDown(float f) { - // Math.nextDown considers that -0 is the same as +0 - // while point ranges are consistent with Float.compare, so - // they consider that -0 < +0, so we explicitly make sure - // that nextDown(+0) returns -0 - if (Float.floatToIntBits(f) == Float.floatToIntBits(0f)) { - return -0f; - } else { - return Math.nextDown(f); - } - } - - private float nextUp(float f) { - // Math.nextUp considers that -0 is the same as +0 - // while point ranges are consistent with Float.compare, so - // they consider that -0 < +0, so we explicitly make sure - // that nextUp(-0) returns +0 - if (Float.floatToIntBits(f) == Float.floatToIntBits(-0f)) { - return +0f; - } else { - return Math.nextUp(f); - } - } - @Override Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { @@ -334,13 +286,13 @@ public class NumberFieldMapper extends FieldMapper { if (lowerTerm != null) { l = parse(lowerTerm, false); if (includeLower == false) { - l = nextUp(l); + l = FloatPoint.nextUp(l); } } if (upperTerm != null) { u = parse(upperTerm, false); if (includeUpper == false) { - u = nextDown(u); + u = FloatPoint.nextDown(u); } } return FloatPoint.newRangeQuery(field, l, u); @@ -414,30 +366,6 @@ public class NumberFieldMapper extends FieldMapper { return DoublePoint.newSetQuery(field, v); } - private double nextDown(double d) { - // Math.nextDown considers that -0 is the same as +0 - // while point ranges are consistent with Double.compare, so - // they consider that -0 < +0, so we explicitly make sure - // that nextDown(+0) returns -0 - if (Double.doubleToLongBits(d) == Double.doubleToLongBits(0d)) { - return -0d; - } else { - return Math.nextDown(d); - } - } - - private double nextUp(double d) { - // Math.nextUp considers that -0 is the same as +0 - // while point ranges are consistent with Double.compare, so - // they consider that -0 < +0, so we explicitly make sure - // that nextUp(-0) returns +0 - if (Double.doubleToLongBits(d) == Double.doubleToLongBits(-0d)) { - return +0d; - } else { - return Math.nextUp(d); - } - } - @Override Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { @@ -446,13 +374,13 @@ public class NumberFieldMapper extends FieldMapper { if (lowerTerm != null) { l = parse(lowerTerm, false); if (includeLower == false) { - l = nextUp(l); + l = DoublePoint.nextUp(l); } } if (upperTerm != null) { u = parse(upperTerm, false); if (includeUpper == false) { - u = nextDown(u); + u = DoublePoint.nextDown(u); } } return DoublePoint.newRangeQuery(field, l, u); diff --git a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java index c9664265d3a..1fbeb81febc 100644 --- a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -19,16 +19,7 @@ package org.elasticsearch.index.search; -import static org.apache.lucene.analysis.synonym.SynonymGraphFilter.GRAPH_FLAG; - import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.CachingTokenFilter; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.synonym.GraphTokenStreamFiniteStrings; -import org.apache.lucene.analysis.tokenattributes.FlagsAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; -import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.index.Term; import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; @@ -36,7 +27,6 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FuzzyQuery; -import org.apache.lucene.search.GraphQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PhraseQuery; @@ -58,8 +48,6 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.QueryParsers; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; public class MatchQuery { @@ -316,116 +304,6 @@ public class MatchQuery { this.mapper = mapper; } - /** - * Creates a query from the analysis chain. Overrides original so all it does is create the token stream and pass that into the - * new {@link #createFieldQuery(TokenStream, Occur, String, boolean, int)} method which has all the original query generation logic. - * - * @param analyzer analyzer used for this query - * @param operator default boolean operator used for this query - * @param field field to create queries against - * @param queryText text to be passed to the analysis chain - * @param quoted true if phrases should be generated when terms occur at more than one position - * @param phraseSlop slop factor for phrase/multiphrase queries - */ - @Override - protected final Query createFieldQuery(Analyzer analyzer, BooleanClause.Occur operator, String field, String queryText, - boolean quoted, int phraseSlop) { - assert operator == BooleanClause.Occur.SHOULD || operator == BooleanClause.Occur.MUST; - - // Use the analyzer to get all the tokens, and then build an appropriate - // query based on the analysis chain. - try (TokenStream source = analyzer.tokenStream(field, queryText)) { - return createFieldQuery(source, operator, field, quoted, phraseSlop); - } catch (IOException e) { - throw new RuntimeException("Error analyzing query text", e); - } - } - - /** - * Creates a query from a token stream. Same logic as {@link #createFieldQuery(Analyzer, Occur, String, String, boolean, int)} - * with additional graph token stream detection. - * - * @param source the token stream to create the query from - * @param operator default boolean operator used for this query - * @param field field to create queries against - * @param quoted true if phrases should be generated when terms occur at more than one position - * @param phraseSlop slop factor for phrase/multiphrase queries - */ - protected final Query createFieldQuery(TokenStream source, BooleanClause.Occur operator, String field, boolean quoted, - int phraseSlop) { - assert operator == BooleanClause.Occur.SHOULD || operator == BooleanClause.Occur.MUST; - - // Build an appropriate query based on the analysis chain. - try (CachingTokenFilter stream = new CachingTokenFilter(source)) { - - TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); - PositionIncrementAttribute posIncAtt = stream.addAttribute(PositionIncrementAttribute.class); - PositionLengthAttribute posLenAtt = stream.addAttribute(PositionLengthAttribute.class); - FlagsAttribute flagsAtt = stream.addAttribute(FlagsAttribute.class); - - if (termAtt == null) { - return null; - } - - // phase 1: read through the stream and assess the situation: - // counting the number of tokens/positions and marking if we have any synonyms. - - int numTokens = 0; - int positionCount = 0; - boolean hasSynonyms = false; - boolean isGraph = false; - - stream.reset(); - while (stream.incrementToken()) { - numTokens++; - int positionIncrement = posIncAtt.getPositionIncrement(); - if (positionIncrement != 0) { - positionCount += positionIncrement; - } else { - hasSynonyms = true; - } - - int positionLength = posLenAtt.getPositionLength(); - if (!isGraph && positionLength > 1 && ((flagsAtt.getFlags() & GRAPH_FLAG) == GRAPH_FLAG)) { - isGraph = true; - } - } - - // phase 2: based on token count, presence of synonyms, and options - // formulate a single term, boolean, or phrase. - - if (numTokens == 0) { - return null; - } else if (numTokens == 1) { - // single term - return analyzeTerm(field, stream); - } else if (isGraph) { - // graph - return analyzeGraph(stream, operator, field, quoted, phraseSlop); - } else if (quoted && positionCount > 1) { - // phrase - if (hasSynonyms) { - // complex phrase with synonyms - return analyzeMultiPhrase(field, stream, phraseSlop); - } else { - // simple phrase - return analyzePhrase(field, stream, phraseSlop); - } - } else { - // boolean - if (positionCount == 1) { - // only one position, with synonyms - return analyzeBoolean(field, stream); - } else { - // complex case: multiple positions - return analyzeMultiBoolean(field, stream, operator); - } - } - } catch (IOException e) { - throw new RuntimeException("Error analyzing query text", e); - } - } - @Override protected Query newTermQuery(Term term) { return blendTermQuery(term, mapper); @@ -492,30 +370,6 @@ public class MatchQuery { return booleanQuery; } - - /** - * Creates a query from a graph token stream by extracting all the finite strings from the graph and using them to create the query. - */ - protected Query analyzeGraph(TokenStream source, BooleanClause.Occur operator, String field, boolean quoted, int phraseSlop) - throws IOException { - source.reset(); - GraphTokenStreamFiniteStrings graphTokenStreams = new GraphTokenStreamFiniteStrings(); - List tokenStreams = graphTokenStreams.getTokenStreams(source); - - if (tokenStreams.isEmpty()) { - return null; - } - - List queries = new ArrayList<>(tokenStreams.size()); - for (TokenStream ts : tokenStreams) { - Query query = createFieldQuery(ts, operator, field, quoted, phraseSlop); - if (query != null) { - queries.add(query); - } - } - - return new GraphQuery(queries.toArray(new Query[0])); - } } protected Query blendTermsQuery(Term[] terms, MappedFieldType fieldType) { diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index 623f883f492..07ce6663505 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.1.jar}" { //// Very special jar permissions: //// These are dangerous permissions that we don't want to grant to everything. -grant codeBase "${codebase.lucene-core-6.4.0-snapshot-ec38570.jar}" { +grant codeBase "${codebase.lucene-core-6.4.0-snapshot-084f7a0.jar}" { // needed to allow MMapDirectory's "unmap hack" (die unmap hack, die) // java 8 package permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; @@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-core-6.4.0-snapshot-ec38570.jar}" { permission java.lang.RuntimePermission "accessDeclaredMembers"; }; -grant codeBase "${codebase.lucene-misc-6.4.0-snapshot-ec38570.jar}" { +grant codeBase "${codebase.lucene-misc-6.4.0-snapshot-084f7a0.jar}" { // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper permission java.nio.file.LinkPermission "hard"; }; diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index 6f44c37d233..43fbe43f220 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -33,7 +33,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; -grant codeBase "${codebase.lucene-test-framework-6.4.0-snapshot-ec38570.jar}" { +grant codeBase "${codebase.lucene-test-framework-6.4.0-snapshot-084f7a0.jar}" { // needed by RamUsageTester permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // needed for testing hardlinks in StoreRecoveryTests since we install MockFS diff --git a/core/src/test/java/org/elasticsearch/index/search/MatchQueryIT.java b/core/src/test/java/org/elasticsearch/index/search/MatchQueryIT.java index f6fbc3410ac..0cd185bc03a 100644 --- a/core/src/test/java/org/elasticsearch/index/search/MatchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/index/search/MatchQueryIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.search; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; @@ -120,9 +119,9 @@ public class MatchQueryIT extends ESIntegTestCase { SearchResponse searchResponse = client().prepareSearch(INDEX).setQuery(QueryBuilders.matchQuery("field", "say what the fudge") .operator(Operator.AND).analyzer("lower_syns")).get(); - // 0 = say, 1 = OR(wtf, what), 2 = the, 3 = fudge - // "the" and "fudge" are required here, even though they were part of the synonym which is also expanded - assertNoSearchHits(searchResponse); + // Old synonyms work fine in that case, but it is coincidental + assertHitCount(searchResponse, 1L); + assertSearchHits(searchResponse, "1"); // same query using graph should find correct result searchResponse = client().prepareSearch(INDEX).setQuery(QueryBuilders.matchQuery("field", "say what the fudge") diff --git a/modules/lang-expression/licenses/lucene-expressions-6.4.0-snapshot-084f7a0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..67db46515f2 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +1f0be480db66169f45a9f0982fbad9f549b88b55 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-6.4.0-snapshot-ec38570.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index b68a4d5cbd7..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -031d34e0a604a7cbb5c8ba816d49d9f622adaa3f \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.4.0-snapshot-084f7a0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..28179b5a015 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +8e6047ca31d0284e1ccac1ac2d5cbf1b8e3e1b04 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.4.0-snapshot-ec38570.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 7ee6c4f0787..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0850319baf063c5ee54aecabeaddb95efde8711b \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.4.0-snapshot-084f7a0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..3a94f405278 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +dca59de9397d9bd33ad0714cd9896fc1bb8f13ef \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.4.0-snapshot-ec38570.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index c66710ea344..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a2af1d2e80b9901b3e950f5ac1b6cd1eb408fd3 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.4.0-snapshot-084f7a0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..b2495d0595b --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +0459b8e596e91ed00d5b36bc61adad53372c6491 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.4.0-snapshot-ec38570.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 06702f8d87e..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e9243da1482f88a91bd5239316b571259d24341 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.4.0-snapshot-084f7a0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..4860dad3ad2 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +4ad39a97e64f0a477a58934318b7f129f8c33c55 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.4.0-snapshot-ec38570.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 561a46f2a2c..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2ead714733bb3cc90e9792d76021497946d5af09 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.4.0-snapshot-084f7a0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..805e3dddf69 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +1ae21b511636da5abd5f498d20fb087fa07fc7c2 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.4.0-snapshot-ec38570.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index c3b55dc76a1..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a8f3b58e6c672276331f54b5c3be49c8014ec5c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.4.0-snapshot-084f7a0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.4.0-snapshot-084f7a0.jar.sha1 new file mode 100644 index 00000000000..d89554321a1 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.4.0-snapshot-084f7a0.jar.sha1 @@ -0,0 +1 @@ +9be4966458f88699fb09fb0f6a3a71017e7678e7 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.4.0-snapshot-ec38570.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.4.0-snapshot-ec38570.jar.sha1 deleted file mode 100644 index 3fbc82d91c1..00000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.4.0-snapshot-ec38570.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -763b3144b9bc53328e923242a3c6614903ee2d7e \ No newline at end of file diff --git a/test/framework/src/main/java/org/elasticsearch/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/AnalysisFactoryTestCase.java index 6a5c764375c..1634f049392 100644 --- a/test/framework/src/main/java/org/elasticsearch/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/AnalysisFactoryTestCase.java @@ -82,6 +82,7 @@ import org.elasticsearch.index.analysis.StandardTokenizerFactory; import org.elasticsearch.index.analysis.StemmerOverrideTokenFilterFactory; import org.elasticsearch.index.analysis.StemmerTokenFilterFactory; import org.elasticsearch.index.analysis.StopTokenFilterFactory; +import org.elasticsearch.index.analysis.SynonymGraphTokenFilterFactory; import org.elasticsearch.index.analysis.SynonymTokenFilterFactory; import org.elasticsearch.index.analysis.ThaiTokenizerFactory; import org.elasticsearch.index.analysis.TrimTokenFilterFactory; @@ -240,6 +241,7 @@ public class AnalysisFactoryTestCase extends ESTestCase { .put("stop", StopTokenFilterFactory.class) .put("swedishlightstem", StemmerTokenFilterFactory.class) .put("synonym", SynonymTokenFilterFactory.class) + .put("synonymgraph", SynonymGraphTokenFilterFactory.class) .put("trim", TrimTokenFilterFactory.class) .put("truncate", TruncateTokenFilterFactory.class) .put("turkishlowercase", LowerCaseTokenFilterFactory.class) @@ -275,6 +277,8 @@ public class AnalysisFactoryTestCase extends ESTestCase { .put("fingerprint", Void.class) // for tee-sinks .put("daterecognizer", Void.class) + // to flatten graphs created by the synonym graph filter + .put("flattengraph", Void.class) .immutableMap(); From 9d422c1c34e8712a595a173196ed11a8456d7fdf Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Wed, 4 Jan 2017 13:16:49 -0600 Subject: [PATCH 086/119] IndicesService handles all exceptions during index deletion (#22433) Previously, we could run into a situation where attempting to delete an index due to a cluster state update would cause an unhandled exception to bubble up to the ClusterService and cause the cluster state applier to fail. The result of this situation is that the cluster state never gets updated on the ClusterService because the exception happens before all cluster state appliers have completed and the ClusterService only updates the cluster state once all cluster state appliers have successfully completed. All other methods on IndicesService properly handle all exceptions and not just IOExceptions, but there were two instances with respect to index deletion where only IOExceptions where handled by the IndicesService. If any other exception occurred during these delete operations, the exception would be bubbled up to the ClusterService, causing the aforementioned issues. This commit ensures all methods in IndicesService properly capture all types of Exceptions, so that the ClusterService manages to update the cluster state, even in the presence of shard creation/deletion failures. Note that the lack of updating the cluster state in the presence of such exceptions can have many unintended consequences, one of them being the tripping of the assertion in IndicesClusterStateService#removeUnallocatedIndices where the assumption is that if there is an IndexService to remove with an unassigned shard, then the index must exist in the cluster state, but if the cluster state was never updated due to the aforementioned exceptions, then the cluster state will not have the index in question. --- .../main/java/org/elasticsearch/indices/IndicesService.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 8f5d01d7683..0ab2f3ca281 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -602,7 +602,7 @@ public class IndicesService extends AbstractLifecycleComponent "the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); } deleteIndexStore(reason, metaData, clusterState); - } catch (IOException e) { + } catch (Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete unassigned index (reason [{}])", metaData.getIndex(), reason), e); } } @@ -764,14 +764,14 @@ public class IndicesService extends AbstractLifecycleComponent final IndexMetaData metaData; try { metaData = metaStateService.loadIndexState(index); - } catch (IOException e) { + } catch (Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to load state file from a stale deleted index, folders will be left on disk", index), e); return null; } final IndexSettings indexSettings = buildIndexSettings(metaData); try { deleteIndexStoreIfDeletionAllowed("stale deleted index", index, indexSettings, ALWAYS_TRUE); - } catch (IOException e) { + } catch (Exception e) { // we just warn about the exception here because if deleteIndexStoreIfDeletionAllowed // throws an exception, it gets added to the list of pending deletes to be tried again logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete index on disk", metaData.getIndex()), e); From 2f510b38c31a866ba6bf76eef4fae3f57b936789 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Wed, 4 Jan 2017 14:34:00 -0500 Subject: [PATCH 087/119] [TEST] explain API rest test may have shard allocation throttled --- .../test/cluster.allocation_explain/10_basic.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml index 9e1a57a4980..5ad5a4dce39 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml @@ -62,5 +62,4 @@ - match: { shard: 0 } - match: { primary: false } - is_true: cluster_info - - match: { can_allocate: "no" } - - match: { allocate_explanation : "cannot allocate because allocation is not permitted to any of the nodes" } + - is_true: can_allocate From be22a250b62b4fe33c59de665dbcf0708803ef9e Mon Sep 17 00:00:00 2001 From: Tim B Date: Wed, 4 Jan 2017 14:38:51 -0600 Subject: [PATCH 088/119] Replace Socket, ServerSocket, and HttpServer usages in tests with mocksocket versions (#22287) This integrates the mocksocket jar with elasticsearch tests. Mocksocket wraps actions requiring SocketPermissions in doPrivilege blocks. This will eventually allow SocketPermissions to be assigned to the mocksocket jar opposed to the entire elasticsearch codebase. --- buildSrc/version.properties | 1 + client/rest/build.gradle | 1 + .../RestClientMultipleHostsIntegTests.java | 3 ++- .../client/RestClientSingleHostIntegTests.java | 3 ++- client/sniffer/build.gradle | 1 + .../sniff/ElasticsearchHostsSnifferTests.java | 3 ++- .../Netty4SizeHeaderFrameDecoderTests.java | 5 +++-- .../AzureDiscoveryClusterFormationTests.java | 3 ++- .../ec2/Ec2DiscoveryClusterFormationTests.java | 3 ++- .../discovery/gce/GceDiscoverTests.java | 5 +++-- .../plugin/example/ExampleExternalIT.java | 3 ++- test/framework/build.gradle | 1 + .../ClusterDiscoveryConfiguration.java | 3 ++- .../AbstractSimpleTransportTestCase.java | 17 +++++++++-------- .../transport/MockTcpTransport.java | 6 ++++-- 15 files changed, 37 insertions(+), 21 deletions(-) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 4fd4f26bd71..15d2f320962 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -20,5 +20,6 @@ commonslogging = 1.1.3 commonscodec = 1.10 hamcrest = 1.3 securemock = 1.2 +mocksocket = 1.1 # benchmark dependencies jmh = 1.17.3 diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 1c92013da97..67f8426fb5f 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -43,6 +43,7 @@ dependencies { testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" testCompile "org.elasticsearch:securemock:${versions.securemock}" + testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}" testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15" signature "org.codehaus.mojo.signature:java17:1.0@signature" } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index f997f798712..da5a960c2e8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -24,6 +24,7 @@ import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; import org.apache.http.HttpHost; import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; +import org.elasticsearch.mocksocket.MockHttpServer; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -80,7 +81,7 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { } private static HttpServer createHttpServer() throws Exception { - HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); httpServer.start(); //returns a different status code depending on the path for (int statusCode : getAllStatusCodes()) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 4440c1e8f97..2f94de7154c 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -29,6 +29,7 @@ import org.apache.http.HttpHost; import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; +import org.elasticsearch.mocksocket.MockHttpServer; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -87,7 +88,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { } private static HttpServer createHttpServer() throws Exception { - HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); httpServer.start(); //returns a different status code depending on the path for (int statusCode : getAllStatusCodes()) { diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index f35110e4f9e..5542792835b 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -43,6 +43,7 @@ dependencies { testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" testCompile "org.elasticsearch:securemock:${versions.securemock}" + testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}" testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15" signature "org.codehaus.mojo.signature:java17:1.0@signature" } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java index aeb0620134b..5221b205dd4 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientTestCase; +import org.elasticsearch.mocksocket.MockHttpServer; import org.junit.After; import org.junit.Before; @@ -141,7 +142,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { } private static HttpServer createHttpServer(final SniffResponse sniffResponse, final int sniffTimeoutMillis) throws IOException { - HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); httpServer.createContext("/_nodes/http", new ResponseHandler(sniffTimeoutMillis, sniffResponse)); return httpServer; } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java index 0e9ebe5f7f4..15dea8fe871 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.mocksocket.MockSocket; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportSettings; @@ -84,7 +85,7 @@ public class Netty4SizeHeaderFrameDecoderTests extends ESTestCase { String randomMethod = randomFrom("GET", "POST", "PUT", "DELETE", "HEAD", "OPTIONS", "PATCH"); String data = randomMethod + " / HTTP/1.1"; - try (Socket socket = new Socket(host, port)) { + try (Socket socket = new MockSocket(host, port)) { socket.getOutputStream().write(data.getBytes(StandardCharsets.UTF_8)); socket.getOutputStream().flush(); @@ -95,7 +96,7 @@ public class Netty4SizeHeaderFrameDecoderTests extends ESTestCase { } public void testThatNothingIsReturnedForOtherInvalidPackets() throws Exception { - try (Socket socket = new Socket(host, port)) { + try (Socket socket = new MockSocket(host, port)) { socket.getOutputStream().write("FOOBAR".getBytes(StandardCharsets.UTF_8)); socket.getOutputStream().flush(); diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java index 7f977592e8d..9787dc136e3 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; +import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.node.Node; import org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin; import org.elasticsearch.plugins.Plugin; @@ -131,7 +132,7 @@ public class AzureDiscoveryClusterFormationTests extends ESIntegTestCase { public static void startHttpd() throws Exception { logDir = createTempDir(); SSLContext sslContext = getSSLContext(); - httpsServer = HttpsServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); + httpsServer = MockHttpServer.createHttps(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); httpsServer.setHttpsConfigurator(new HttpsConfigurator(sslContext)); httpsServer.createContext("/subscription/services/hostedservices/myservice", (s) -> { Headers headers = s.getResponseHeaders(); diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java index b4a1f55a3c6..4def3737563 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.plugin.discovery.ec2.Ec2DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -96,7 +97,7 @@ public class Ec2DiscoveryClusterFormationTests extends ESIntegTestCase { @BeforeClass public static void startHttpd() throws Exception { logDir = createTempDir(); - httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); + httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); httpServer.createContext("/", (s) -> { Headers headers = s.getResponseHeaders(); diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java index 76d7c6408d5..fc0234671f0 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.plugin.discovery.gce.GceDiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -108,8 +109,8 @@ public class GceDiscoverTests extends ESIntegTestCase { public static void startHttpd() throws Exception { logDir = createTempDir(); SSLContext sslContext = getSSLContext(); - httpsServer = HttpsServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); - httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); + httpsServer = MockHttpServer.createHttps(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); + httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); httpsServer.setHttpsConfigurator(new HttpsConfigurator(sslContext)); httpServer.createContext("/computeMetadata/v1/instance/service-accounts/default/token", (s) -> { String response = GceMockUtils.readGoogleInternalJsonResponse( diff --git a/plugins/jvm-example/src/test/java/org/elasticsearch/plugin/example/ExampleExternalIT.java b/plugins/jvm-example/src/test/java/org/elasticsearch/plugin/example/ExampleExternalIT.java index 1f48549aad4..3ed616cb3af 100644 --- a/plugins/jvm-example/src/test/java/org/elasticsearch/plugin/example/ExampleExternalIT.java +++ b/plugins/jvm-example/src/test/java/org/elasticsearch/plugin/example/ExampleExternalIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.plugin.example; +import org.elasticsearch.mocksocket.MockSocket; import org.elasticsearch.test.ESTestCase; import java.io.BufferedReader; @@ -34,7 +35,7 @@ public class ExampleExternalIT extends ESTestCase { String stringAddress = Objects.requireNonNull(System.getProperty("external.address")); URL url = new URL("http://" + stringAddress); InetAddress address = InetAddress.getByName(url.getHost()); - try (Socket socket = new Socket(address, url.getPort()); + try (Socket socket = new MockSocket(address, url.getPort()); BufferedReader reader = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8))) { assertEquals("TEST", reader.readLine()); } diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 8be06574df8..6756495e0a1 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -32,6 +32,7 @@ dependencies { compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile "org.elasticsearch:securemock:${versions.securemock}" + compile "org.elasticsearch:mocksocket:${versions.mocksocket}" } compileJava.options.compilerArgs << '-Xlint:-cast,-rawtypes,-try,-unchecked' diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java index 3fd2b024a1d..f4be0d0d529 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.transport.TransportSettings; @@ -136,7 +137,7 @@ public class ClusterDiscoveryConfiguration extends NodeConfigurationSource { for (int i = 0; i < unicastHostPorts.length; i++) { boolean foundPortInRange = false; while (tries < InternalTestCluster.PORTS_PER_JVM && !foundPortInRange) { - try (ServerSocket serverSocket = new ServerSocket()) { + try (ServerSocket serverSocket = new MockServerSocket()) { // Set SO_REUSEADDR as we may bind here and not be able to reuse the address immediately without it. serverSocket.setReuseAddress(NetworkUtils.defaultReuseAddress()); serverSocket.bind(new InetSocketAddress(IP_ADDR, nextPort)); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 6c407c88874..90ed43bc1fb 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -1366,7 +1367,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { // all is well } - try (Transport.Connection connection = serviceB.openConnection(nodeA, MockTcpTransport.LIGHT_PROFILE)){ + try (Transport.Connection connection = serviceB.openConnection(nodeA, MockTcpTransport.LIGHT_PROFILE)) { serviceB.handshake(connection, 100); fail("exception should be thrown"); } catch (IllegalStateException e) { @@ -1424,7 +1425,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { // all is well } - try (Transport.Connection connection = serviceB.openConnection(nodeA, MockTcpTransport.LIGHT_PROFILE)){ + try (Transport.Connection connection = serviceB.openConnection(nodeA, MockTcpTransport.LIGHT_PROFILE)) { serviceB.handshake(connection, 100); fail("exception should be thrown"); } catch (IllegalStateException e) { @@ -1778,7 +1779,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testTimeoutPerConnection() throws IOException { assumeTrue("Works only on BSD network stacks and apparently windows", Constants.MAC_OS_X || Constants.FREE_BSD || Constants.WINDOWS); - try (ServerSocket socket = new ServerSocket()) { + try (ServerSocket socket = new MockServerSocket()) { // note - this test uses backlog=1 which is implementation specific ie. it might not work on some TCP/IP stacks // on linux (at least newer ones) the listen(addr, backlog=1) should just ignore new connections if the queue is full which // means that once we received an ACK from the client we just drop the packet on the floor (which is what we want) and we run @@ -1823,7 +1824,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Settings.EMPTY, Collections.emptyList())){ + new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Settings.EMPTY, Collections.emptyList())) { @Override protected String handleRequest(MockChannel mockChannel, String profileName, StreamInput stream, long requestId, int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) @@ -1854,7 +1855,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } public void testTcpHandshakeTimeout() throws IOException { - try (ServerSocket socket = new ServerSocket()) { + try (ServerSocket socket = new MockServerSocket()) { socket.bind(new InetSocketAddress(InetAddress.getLocalHost(), 0), 1); socket.setReuseAddress(true); DiscoveryNode dummy = new DiscoveryNode("TEST", new TransportAddress(socket.getInetAddress(), @@ -1870,12 +1871,12 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { builder.setHandshakeTimeout(TimeValue.timeValueMillis(1)); ConnectTransportException ex = expectThrows(ConnectTransportException.class, () -> serviceA.connectToNode(dummy, builder.build())); - assertEquals("[][" + dummy.getAddress() +"] handshake_timeout[1ms]", ex.getMessage()); + assertEquals("[][" + dummy.getAddress() + "] handshake_timeout[1ms]", ex.getMessage()); } } public void testTcpHandshakeConnectionReset() throws IOException, InterruptedException { - try (ServerSocket socket = new ServerSocket()) { + try (ServerSocket socket = new MockServerSocket()) { socket.bind(new InetSocketAddress(InetAddress.getLocalHost(), 0), 1); socket.setReuseAddress(true); DiscoveryNode dummy = new DiscoveryNode("TEST", new TransportAddress(socket.getInetAddress(), @@ -1904,7 +1905,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { builder.setHandshakeTimeout(TimeValue.timeValueHours(1)); ConnectTransportException ex = expectThrows(ConnectTransportException.class, () -> serviceA.connectToNode(dummy, builder.build())); - assertEquals(ex.getMessage(), "[][" + dummy.getAddress() +"] general node connection failure"); + assertEquals(ex.getMessage(), "[][" + dummy.getAddress() + "] general node connection failure"); assertThat(ex.getCause().getMessage(), startsWith("handshake failed")); t.join(); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index a2d5f10483f..3b5b430f606 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -35,6 +35,8 @@ import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.mocksocket.MockServerSocket; +import org.elasticsearch.mocksocket.MockSocket; import org.elasticsearch.threadpool.ThreadPool; import java.io.BufferedInputStream; @@ -110,7 +112,7 @@ public class MockTcpTransport extends TcpTransport @Override protected MockChannel bind(final String name, InetSocketAddress address) throws IOException { - ServerSocket socket = new ServerSocket(); + MockServerSocket socket = new MockServerSocket(); socket.bind(address); socket.setReuseAddress(TCP_REUSE_ADDRESS.get(settings)); ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.get(settings); @@ -178,7 +180,7 @@ public class MockTcpTransport extends TcpTransport final MockChannel[] mockChannels = new MockChannel[1]; final NodeChannels nodeChannels = new NodeChannels(node, mockChannels, LIGHT_PROFILE); // we always use light here boolean success = false; - final Socket socket = new Socket(); + final MockSocket socket = new MockSocket(); try { Consumer onClose = (channel) -> { final NodeChannels connected = connectedNodes.get(node); From bf5152278882b74b1a156e90d87adb954eead4e6 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 4 Jan 2017 14:00:54 -0800 Subject: [PATCH 089/119] Add 5.3 version --- core/src/main/java/org/elasticsearch/Version.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index bac6ccbf3fd..1ee42adaee3 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -103,6 +103,8 @@ public class Version implements Comparable { public static final Version V_5_1_2_UNRELEASED = new Version(V_5_1_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0); public static final int V_5_2_0_ID_UNRELEASED = 5020099; public static final Version V_5_2_0_UNRELEASED = new Version(V_5_2_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0); + public static final int V_5_3_0_ID_UNRELEASED = 5030099; + public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0); public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001; public static final Version V_6_0_0_alpha1_UNRELEASED = new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0); @@ -123,6 +125,8 @@ public class Version implements Comparable { switch (id) { case V_6_0_0_alpha1_ID_UNRELEASED: return V_6_0_0_alpha1_UNRELEASED; + case V_5_3_0_ID_UNRELEASED: + return V_5_3_0_UNRELEASED; case V_5_2_0_ID_UNRELEASED: return V_5_2_0_UNRELEASED; case V_5_1_2_ID_UNRELEASED: From a5daa5d3a2f58dda0ab6133ccc5ead4d4a701c3a Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 5 Jan 2017 07:32:53 +0100 Subject: [PATCH 090/119] Execute low level handshake in #openConnection (#22440) Today we execute the low level handshake on the TCP layer in #connectToNode. If #openConnection is used directly, which is truly expert, no handshake is executed which allows connecting to nodes that are not necessarily compatible. This change moves the handshake to #openConnection to prevent bypassing this logic. --- .../elasticsearch/transport/TcpTransport.java | 20 ++++---- .../test/transport/MockTransportService.java | 5 ++ .../AbstractSimpleTransportTestCase.java | 49 ++++++++++++++++++- 3 files changed, 63 insertions(+), 11 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index 2e8cb4f65ce..c2f0832b75e 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -458,13 +458,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i "failed to connect to [{}], cleaning dangling connections", node), e); throw e; } - Channel channel = nodeChannels.channel(TransportRequestOptions.Type.PING); - final TimeValue connectTimeout = connectionProfile.getConnectTimeout() == null ? - defaultConnectionProfile.getConnectTimeout() : - connectionProfile.getConnectTimeout(); - final TimeValue handshakeTimeout = connectionProfile.getHandshakeTimeout() == null ? - connectTimeout : connectionProfile.getHandshakeTimeout(); - Version version = executeHandshake(node, channel, handshakeTimeout); // we acquire a connection lock, so no way there is an existing connection connectedNodes.put(node, nodeChannels); if (logger.isDebugEnabled()) { @@ -483,11 +476,18 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } @Override - public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { + public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) throws IOException { try { - NodeChannels nodeChannels = connectToChannels(node, profile); + NodeChannels nodeChannels = connectToChannels(node, connectionProfile); + final Channel channel = nodeChannels.getChannels().get(0); // one channel is guaranteed by the connection profile + final TimeValue connectTimeout = connectionProfile.getConnectTimeout() == null ? + defaultConnectionProfile.getConnectTimeout() : + connectionProfile.getConnectTimeout(); + final TimeValue handshakeTimeout = connectionProfile.getHandshakeTimeout() == null ? + connectTimeout : connectionProfile.getHandshakeTimeout(); + final Version version = executeHandshake(node, channel, handshakeTimeout); transportServiceAdapter.onConnectionOpened(node); - return nodeChannels; + return new NodeChannels(nodeChannels, version); // clone the channels - we now have the correct version } catch (ConnectTransportException e) { throw e; } catch (Exception e) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index a0344a6f86d..b0cc848da61 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -684,6 +684,11 @@ public final class MockTransportService extends TransportService { return connection.getNode(); } + @Override + public Version getVersion() { + return connection.getVersion(); + } + @Override public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 90ed43bc1fb..1dbcd23687a 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -43,6 +43,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -1818,6 +1819,52 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } } + public void testHandshakeWithIncompatVersion() { + assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Settings.EMPTY, Collections.emptyList()), + Version.fromString("2.0.0"))) { + transport.transportServiceAdapter(serviceA.new Adapter()); + transport.start(); + DiscoveryNode node = + new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); + builder.addConnections(1, + TransportRequestOptions.Type.BULK, + TransportRequestOptions.Type.PING, + TransportRequestOptions.Type.RECOVERY, + TransportRequestOptions.Type.REG, + TransportRequestOptions.Type.STATE); + expectThrows(ConnectTransportException.class, () -> serviceA.openConnection(node, builder.build())); + } + } + + public void testHandshakeUpdatesVersion() throws IOException { + assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); + try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Settings.EMPTY, Collections.emptyList()),version)) { + transport.transportServiceAdapter(serviceA.new Adapter()); + transport.start(); + DiscoveryNode node = + new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), + Version.fromString("2.0.0")); + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); + builder.addConnections(1, + TransportRequestOptions.Type.BULK, + TransportRequestOptions.Type.PING, + TransportRequestOptions.Type.RECOVERY, + TransportRequestOptions.Type.REG, + TransportRequestOptions.Type.STATE); + try (Transport.Connection connection = serviceA.openConnection(node, builder.build())) { + assertEquals(connection.getVersion(), version); + } + } + } + + public void testTcpHandshake() throws IOException, InterruptedException { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); TcpTransport originalTransport = (TcpTransport) serviceA.getOriginalTransport(); @@ -1830,7 +1877,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) throws IOException { return super.handleRequest(mockChannel, profileName, stream, requestId, messageLengthBytes, version, remoteAddress, - (byte)(status & ~(1<<3))); // we flip the isHanshake bit back and ackt like the handler is not found + (byte)(status & ~(1<<3))); // we flip the isHandshake bit back and act like the handler is not found } }) { transport.transportServiceAdapter(serviceA.new Adapter()); From 1899aea9cae17f928e86650cac40664f7fda86c4 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 29 Dec 2016 15:30:37 +0100 Subject: [PATCH 091/119] [TEST] move randomHeaders method from RestClientTestCase to RestClientTestUtil and simplify headers assertions --- .../RestClientSingleHostIntegTests.java | 32 ++---- .../client/RestClientSingleHostTests.java | 34 ++---- .../client/RestClientTestCase.java | 101 ++++++++---------- .../client/RestClientTestUtil.java | 23 ++++ 4 files changed, 80 insertions(+), 110 deletions(-) diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 2f94de7154c..8f4170add3d 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -40,7 +40,6 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -50,7 +49,6 @@ import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; /** @@ -77,8 +75,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { } httpServer = createHttpServer(); - int numHeaders = randomIntBetween(0, 5); - defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders); + defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); RestClientBuilder restClientBuilder = RestClient.builder( new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())).setDefaultHeaders(defaultHeaders); if (pathPrefix.length() > 0) { @@ -151,17 +148,11 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { if (method.equals("HEAD") == false) { standardHeaders.add("Content-length"); } - - final int numHeaders = randomIntBetween(1, 5); - final Header[] headers = generateHeaders("Header", "Header-array", numHeaders); - final Map> expectedHeaders = new HashMap<>(); - - addHeaders(expectedHeaders, defaultHeaders, headers); - + final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); Response esResponse; try { - esResponse = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), headers); + esResponse = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), requestHeaders); } catch(ResponseException e) { esResponse = e.getResponse(); } @@ -169,24 +160,13 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { assertEquals(method, esResponse.getRequestLine().getMethod()); assertEquals(statusCode, esResponse.getStatusLine().getStatusCode()); assertEquals((pathPrefix.length() > 0 ? pathPrefix : "") + "/" + statusCode, esResponse.getRequestLine().getUri()); - + assertHeaders(defaultHeaders, requestHeaders, esResponse.getHeaders(), standardHeaders); for (final Header responseHeader : esResponse.getHeaders()) { - final String name = responseHeader.getName(); - final String value = responseHeader.getValue(); - if (name.startsWith("Header")) { - final List values = expectedHeaders.get(name); - assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values); - assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value)); - - // we've collected them all - if (values.isEmpty()) { - expectedHeaders.remove(name); - } - } else { + String name = responseHeader.getName(); + if (name.startsWith("Header") == false) { assertTrue("unknown header was returned " + name, standardHeaders.remove(name)); } } - assertTrue("some headers that were sent weren't returned: " + expectedHeaders, expectedHeaders.isEmpty()); assertTrue("some expected standard headers weren't returned: " + standardHeaders, standardHeaders.isEmpty()); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index ce0d6d0936e..865f9b1817a 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -56,7 +56,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.Future; @@ -70,7 +69,6 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -131,9 +129,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { } }); - - int numHeaders = randomIntBetween(0, 3); - defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders); + defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); httpHost = new HttpHost("localhost", 9200); failureListener = new HostsTrackingFailureListener(); restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, null, failureListener); @@ -339,33 +335,16 @@ public class RestClientSingleHostTests extends RestClientTestCase { */ public void testHeaders() throws IOException { for (String method : getHttpMethods()) { - final int numHeaders = randomIntBetween(1, 5); - final Header[] headers = generateHeaders("Header", null, numHeaders); - final Map> expectedHeaders = new HashMap<>(); - - addHeaders(expectedHeaders, defaultHeaders, headers); - + final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); Response esResponse; try { - esResponse = restClient.performRequest(method, "/" + statusCode, headers); + esResponse = restClient.performRequest(method, "/" + statusCode, requestHeaders); } catch(ResponseException e) { esResponse = e.getResponse(); } assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode)); - for (Header responseHeader : esResponse.getHeaders()) { - final String name = responseHeader.getName(); - final String value = responseHeader.getValue(); - final List values = expectedHeaders.get(name); - assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values); - assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value)); - - // we've collected them all - if (values.isEmpty()) { - expectedHeaders.remove(name); - } - } - assertTrue("some headers that were sent weren't returned " + expectedHeaders, expectedHeaders.isEmpty()); + assertHeaders(defaultHeaders, requestHeaders, esResponse.getHeaders(), Collections.emptySet()); } } @@ -424,10 +403,9 @@ public class RestClientSingleHostTests extends RestClientTestCase { } Header[] headers = new Header[0]; - final int numHeaders = randomIntBetween(1, 5); - final Set uniqueNames = new HashSet<>(numHeaders); + final Set uniqueNames = new HashSet<>(); if (randomBoolean()) { - headers = generateHeaders("Header", "Header-array", numHeaders); + headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); for (Header header : headers) { request.addHeader(header); uniqueNames.add(header.getName()); diff --git a/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java b/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java index 4296932a002..6a2a45ef281 100644 --- a/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java +++ b/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java @@ -30,16 +30,19 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; - import org.apache.http.Header; -import org.apache.http.message.BasicHeader; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + @TestMethodProviders({ JUnit3MethodProvider.class }) @@ -53,70 +56,56 @@ import java.util.Set; public abstract class RestClientTestCase extends RandomizedTest { /** - * Create the specified number of {@link Header}s. - *

- * Generated header names will be the {@code baseName} plus its index or, rarely, the {@code arrayName} if it's supplied. + * Assert that the actual headers are the expected ones given the original default and request headers. Some headers can be ignored, + * for instance in case the http client is adding its own automatically. * - * @param baseName The base name to use for all headers. - * @param arrayName The optional ({@code null}able) array name to use randomly. - * @param headers The number of headers to create. - * @return Never {@code null}. + * @param defaultHeaders the default headers set to the REST client instance + * @param requestHeaders the request headers sent with a particular request + * @param actualHeaders the actual headers as a result of the provided default and request headers + * @param ignoreHeaders header keys to be ignored as they are not part of default nor request headers, yet they + * will be part of the actual ones */ - protected static Header[] generateHeaders(final String baseName, final String arrayName, final int headers) { - final Header[] generated = new Header[headers]; - for (int i = 0; i < headers; i++) { - String headerName = baseName + i; - if (arrayName != null && rarely()) { - headerName = arrayName; - } - - generated[i] = new BasicHeader(headerName, randomAsciiOfLengthBetween(3, 10)); + protected static void assertHeaders(final Header[] defaultHeaders, final Header[] requestHeaders, + final Header[] actualHeaders, final Set ignoreHeaders) { + final Map> expectedHeaders = new HashMap<>(); + final Set requestHeaderKeys = new HashSet<>(); + for (final Header header : requestHeaders) { + final String name = header.getName(); + addValueToListEntry(expectedHeaders, name, header.getValue()); + requestHeaderKeys.add(name); } - return generated; + for (final Header defaultHeader : defaultHeaders) { + final String name = defaultHeader.getName(); + if (requestHeaderKeys.contains(name) == false) { + addValueToListEntry(expectedHeaders, name, defaultHeader.getValue()); + } + } + Set actualIgnoredHeaders = new HashSet<>(); + for (Header responseHeader : actualHeaders) { + final String name = responseHeader.getName(); + if (ignoreHeaders.contains(name)) { + expectedHeaders.remove(name); + actualIgnoredHeaders.add(name); + continue; + } + final String value = responseHeader.getValue(); + final List values = expectedHeaders.get(name); + assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values); + assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value)); + if (values.isEmpty()) { + expectedHeaders.remove(name); + } + } + assertEquals("some headers meant to be ignored were not part of the actual headers", ignoreHeaders, actualIgnoredHeaders); + assertTrue("some headers that were sent weren't returned " + expectedHeaders, expectedHeaders.isEmpty()); } - /** - * Create a new {@link List} within the {@code map} if none exists for {@code name} or append to the existing list. - * - * @param map The map to manipulate. - * @param name The name to create/append the list for. - * @param value The value to add. - */ - private static void createOrAppendList(final Map> map, final String name, final String value) { + private static void addValueToListEntry(final Map> map, final String name, final String value) { List values = map.get(name); - if (values == null) { values = new ArrayList<>(); map.put(name, values); } - values.add(value); } - - /** - * Add the {@code headers} to the {@code map} so that related tests can more easily assert that they exist. - *

- * If both the {@code defaultHeaders} and {@code headers} contain the same {@link Header}, based on its - * {@linkplain Header#getName() name}, then this will only use the {@code Header}(s) from {@code headers}. - * - * @param map The map to build with name/value(s) pairs. - * @param defaultHeaders The headers to add to the map representing default headers. - * @param headers The headers to add to the map representing request-level headers. - * @see #createOrAppendList(Map, String, String) - */ - protected static void addHeaders(final Map> map, final Header[] defaultHeaders, final Header[] headers) { - final Set uniqueHeaders = new HashSet<>(); - for (final Header header : headers) { - final String name = header.getName(); - createOrAppendList(map, name, header.getValue()); - uniqueHeaders.add(name); - } - for (final Header defaultHeader : defaultHeaders) { - final String name = defaultHeader.getName(); - if (uniqueHeaders.contains(name) == false) { - createOrAppendList(map, name, defaultHeader.getValue()); - } - } - } - } diff --git a/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java index 4d4aa00f492..dbf85578b19 100644 --- a/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java +++ b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java @@ -19,7 +19,11 @@ package org.elasticsearch.client; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.apache.http.Header; +import org.apache.http.message.BasicHeader; import java.util.ArrayList; import java.util.Arrays; @@ -81,4 +85,23 @@ final class RestClientTestUtil { static List getAllStatusCodes() { return ALL_STATUS_CODES; } + + /** + * Create a random number of {@link Header}s. + * Generated header names will either be the {@code baseName} plus its index, or exactly the provided {@code baseName} so that the + * we test also support for multiple headers with same key and different values. + */ + static Header[] randomHeaders(Random random, final String baseName) { + int numHeaders = RandomNumbers.randomIntBetween(random, 0, 5); + final Header[] headers = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = baseName; + //randomly exercise the code path that supports multiple headers with same key + if (random.nextBoolean()) { + headerName = headerName + i; + } + headers[i] = new BasicHeader(headerName, RandomStrings.randomAsciiOfLengthBetween(random, 3, 10)); + } + return headers; + } } From f0181b19f565ad255a7705190077897ec173ac0f Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 29 Dec 2016 18:16:04 +0100 Subject: [PATCH 092/119] add REST high level client gradle submodule and first simple method The RestHighLevelClient class takes as as an argument a low level client instance RestClient. The first method added is ping, which returns true if the call to HEAD / went ok and false if an IOException was thrown. Any other exception gets bubbled up. There are two kinds of tests, a unit test (RestHighLevelClientTests) that verifies the interaction between high level and low level client, and an integration test (MainActionIT) which relies on an externally started es cluster to send requests to. --- .../test/StandaloneTestBasePlugin.groovy | 10 ++- client/rest-high-level/build.gradle | 41 +++++++++ .../client/RestHighLevelClient.java | 51 +++++++++++ .../client/ESRestHighLevelClientTestCase.java | 48 ++++++++++ .../elasticsearch/client/MainActionIT.java | 27 ++++++ .../client/RestHighLevelClientTests.java | 87 +++++++++++++++++++ settings.gradle | 1 + 7 files changed, 262 insertions(+), 3 deletions(-) create mode 100644 client/rest-high-level/build.gradle create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/MainActionIT.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy index af2b20e4abf..db68035e3eb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy @@ -26,6 +26,7 @@ import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.Plugin import org.gradle.api.Project +import org.gradle.api.Task import org.gradle.api.plugins.JavaBasePlugin /** Configures the build to have a rest integration test. */ @@ -40,7 +41,7 @@ public class StandaloneTestBasePlugin implements Plugin { BuildPlugin.configureRepositories(project) // only setup tests to build - project.sourceSets.create('test') + project.sourceSets.maybeCreate('test') project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}") project.eclipse.classpath.sourceSets = [project.sourceSets.test] @@ -48,7 +49,10 @@ public class StandaloneTestBasePlugin implements Plugin { project.idea.module.testSourceDirs += project.sourceSets.test.java.srcDirs project.idea.module.scopes['TEST'] = [plus: [project.configurations.testRuntime]] - PrecommitTasks.create(project, false) - project.check.dependsOn(project.precommit) + Task precommitTask = project.tasks.findByName('precommit') + if (precommitTask == null) { + PrecommitTasks.create(project, false) + project.check.dependsOn(project.precommit) + } } } diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle new file mode 100644 index 00000000000..162e8608d44 --- /dev/null +++ b/client/rest-high-level/build.gradle @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.rest-test' + +group = 'org.elasticsearch.client' + +dependencies { + compile "org.elasticsearch:elasticsearch:${version}" + compile "org.elasticsearch.client:rest:${version}" + + testCompile "org.elasticsearch.client:test:${version}" + testCompile "org.elasticsearch.test:framework:${version}" + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" +} + +dependencyLicenses { + // Don't check licenses for dependency that are part of the elasticsearch project + // But any other dependency should have its license/notice/sha1 + dependencies = project.configurations.runtime.fileCollection { + it.group.startsWith('org.elasticsearch') == false + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java new file mode 100644 index 00000000000..4bde0d37e61 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.Header; + +import java.io.IOException; +import java.util.Objects; + +/** + * High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses. + * The provided {@link RestClient} is externally built and closed. + */ +public final class RestHighLevelClient { + + private static final Log logger = LogFactory.getLog(RestHighLevelClient.class); + + private final RestClient client; + + public RestHighLevelClient(RestClient client) { + this.client = Objects.requireNonNull(client); + } + + public boolean ping(Header... headers) { + try { + client.performRequest("HEAD", "/", headers); + return true; + } catch(IOException exception) { + return false; + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java new file mode 100644 index 00000000000..bc12b1433d7 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.AfterClass; +import org.junit.Before; + +import java.io.IOException; + +public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase { + + private static RestHighLevelClient restHighLevelClient; + + @Before + public void initHighLevelClient() throws IOException { + super.initClient(); + if (restHighLevelClient == null) { + restHighLevelClient = new RestHighLevelClient(client()); + } + } + + @AfterClass + public static void cleanupClient() throws IOException { + restHighLevelClient = null; + } + + protected static RestHighLevelClient highLevelClient() { + return restHighLevelClient; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MainActionIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MainActionIT.java new file mode 100644 index 00000000000..717ab7a44f3 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MainActionIT.java @@ -0,0 +1,27 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +public class MainActionIT extends ESRestHighLevelClientTestCase { + + public void testPing() { + assertTrue(highLevelClient().ping()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java new file mode 100644 index 00000000000..7d513e48998 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.Header; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; +import org.mockito.ArgumentMatcher; +import org.mockito.internal.matchers.ArrayEquals; +import org.mockito.internal.matchers.VarargMatcher; + +import java.io.IOException; +import java.net.SocketTimeoutException; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.argThat; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RestHighLevelClientTests extends ESTestCase { + + private RestClient restClient; + private RestHighLevelClient restHighLevelClient; + + @Before + public void initClient() throws IOException { + restClient = mock(RestClient.class); + restHighLevelClient = new RestHighLevelClient(restClient); + } + + public void testPing() throws IOException { + assertTrue(restHighLevelClient.ping()); + verify(restClient).performRequest(eq("HEAD"), eq("/"), argThat(new HeadersVarargMatcher())); + } + + public void testPingFailure() throws IOException { + when(restClient.performRequest(any(), any())).thenThrow(new IllegalStateException()); + expectThrows(IllegalStateException.class, () -> restHighLevelClient.ping()); + } + + public void testPingFailed() throws IOException { + when(restClient.performRequest(any(), any())).thenThrow(new SocketTimeoutException()); + assertFalse(restHighLevelClient.ping()); + } + + public void testPingWithHeaders() throws IOException { + Header[] headers = RestClientTestUtil.randomHeaders(random(), "Header"); + assertTrue(restHighLevelClient.ping(headers)); + verify(restClient).performRequest(eq("HEAD"), eq("/"), argThat(new HeadersVarargMatcher(headers))); + } + + private class HeadersVarargMatcher extends ArgumentMatcher implements VarargMatcher { + private Header[] expectedHeaders; + + HeadersVarargMatcher(Header... expectedHeaders) { + this.expectedHeaders = expectedHeaders; + } + + @Override + public boolean matches(Object varargArgument) { + if (varargArgument instanceof Header[]) { + Header[] actualHeaders = (Header[]) varargArgument; + return new ArrayEquals(expectedHeaders).matches(actualHeaders); + } + return false; + } + } +} diff --git a/settings.gradle b/settings.gradle index 609f1b0f8be..1125e84325c 100644 --- a/settings.gradle +++ b/settings.gradle @@ -7,6 +7,7 @@ List projects = [ 'core', 'docs', 'client:rest', + 'client:rest-high-level', 'client:sniffer', 'client:transport', 'client:test', From 812f63e5ef11bcba0fa7350327a1e81a5cbd4016 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 3 Jan 2017 15:39:36 -0500 Subject: [PATCH 093/119] Require either BuildPlugin or StandaloneTestBasePlugin to use RestTestPlugin It used to be that RestTestPlugin "came with" StandaloneTestBasePlugin but we'd like to use it with BuildPlugin for the high level rest client. --- .../elasticsearch/gradle/BuildPlugin.groovy | 4 ++++ .../gradle/doc/DocsTestPlugin.groovy | 1 + .../gradle/test/RestTestPlugin.groovy | 18 +++++++++++++++-- .../test/StandaloneTestBasePlugin.groovy | 13 ++++++------ distribution/build.gradle | 5 +++-- qa/backwards-5.0/build.gradle | 20 +++++++++++++++++++ qa/smoke-test-client/build.gradle | 3 ++- qa/smoke-test-http/build.gradle | 3 ++- .../http/ContextAndHeaderTransportIT.java | 3 +-- qa/smoke-test-ingest-disabled/build.gradle | 1 + .../build.gradle | 1 + qa/smoke-test-multinode/build.gradle | 19 ++++++++++++++++++ qa/smoke-test-plugins/build.gradle | 2 +- .../build.gradle | 1 + qa/smoke-test-tribe-node/build.gradle | 1 + 15 files changed, 80 insertions(+), 15 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 628e59de1a6..1cbbe0ac26d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -21,6 +21,7 @@ package org.elasticsearch.gradle import nebula.plugin.extraconfigurations.ProvidedBasePlugin import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.GradleException +import org.gradle.api.InvalidUserDataException import org.gradle.api.JavaVersion import org.gradle.api.Plugin import org.gradle.api.Project @@ -54,6 +55,9 @@ class BuildPlugin implements Plugin { @Override void apply(Project project) { + if (project.pluginManager.hasPlugin('elasticsearch.standalone-test')) { + throw new InvalidUserDataException('elasticsearch.standalone-test and elasticsearch.build are mutually exclusive') + } project.pluginManager.apply('java') project.pluginManager.apply('carrotsearch.randomized-testing') // these plugins add lots of info to our jars diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index a46a7bda374..bb56360645f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -30,6 +30,7 @@ public class DocsTestPlugin extends RestTestPlugin { @Override public void apply(Project project) { + project.pluginManager.apply('elasticsearch.standalone-test') super.apply(project) Map defaultSubstitutions = [ /* These match up with the asciidoc syntax for substitutions but diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy index dc9aa769388..e4f4b35cc78 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy @@ -18,15 +18,29 @@ */ package org.elasticsearch.gradle.test +import org.elasticsearch.gradle.BuildPlugin +import org.gradle.api.InvalidUserDataException import org.gradle.api.Plugin import org.gradle.api.Project -/** A plugin to add rest integration tests. Used for qa projects. */ +/** + * Adds support for starting an Elasticsearch cluster before running integration + * tests. Used in conjunction with {@link StandaloneTestBasePlugin} for qa + * projects and in conjunction with {@link BuildPlugin} for testing the rest + * client. + */ public class RestTestPlugin implements Plugin { + List REQUIRED_PLUGINS = [ + 'elasticsearch.build', + 'elasticsearch.standalone-test'] @Override public void apply(Project project) { - project.pluginManager.apply(StandaloneTestBasePlugin) + if (false == REQUIRED_PLUGINS.any {project.pluginManager.hasPlugin(it)}) { + throw new InvalidUserDataException('elasticsearch.rest-test ' + + 'requires either elasticsearch.build or ' + + 'elasticsearch.standalone-test') + } RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class) integTest.cluster.distribution = 'zip' // rest tests should run with the real zip diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy index db68035e3eb..b9b865cb62d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy @@ -24,6 +24,7 @@ import com.carrotsearch.gradle.junit4.RandomizedTestingPlugin import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.precommit.PrecommitTasks +import org.gradle.api.InvalidUserDataException import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.Task @@ -34,6 +35,9 @@ public class StandaloneTestBasePlugin implements Plugin { @Override public void apply(Project project) { + if (project.pluginManager.hasPlugin('elasticsearch.build')) { + throw new InvalidUserDataException('elasticsearch.standalone-test and elasticsearch.build are mutually exclusive') + } project.pluginManager.apply(JavaBasePlugin) project.pluginManager.apply(RandomizedTestingPlugin) @@ -41,7 +45,7 @@ public class StandaloneTestBasePlugin implements Plugin { BuildPlugin.configureRepositories(project) // only setup tests to build - project.sourceSets.maybeCreate('test') + project.sourceSets.create('test') project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}") project.eclipse.classpath.sourceSets = [project.sourceSets.test] @@ -49,10 +53,7 @@ public class StandaloneTestBasePlugin implements Plugin { project.idea.module.testSourceDirs += project.sourceSets.test.java.srcDirs project.idea.module.scopes['TEST'] = [plus: [project.configurations.testRuntime]] - Task precommitTask = project.tasks.findByName('precommit') - if (precommitTask == null) { - PrecommitTasks.create(project, false) - project.check.dependsOn(project.precommit) - } + PrecommitTasks.create(project, false) + project.check.dependsOn(project.precommit) } } diff --git a/distribution/build.gradle b/distribution/build.gradle index 2cfd7ebbbce..83f82d9cd6b 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -79,7 +79,7 @@ project.rootProject.subprojects.findAll { it.path.startsWith(':modules:') }.each restTestExpansions['expected.modules.count'] += 1 } -// Integ tests work over the rest http layer, so we need a transport included with the integ test zip. +// Integ tests work over the rest http layer, so we need a transport included with the integ test zip. // All transport modules are included so that they may be randomized for testing task buildTransportModules(type: Sync) { into 'build/transport-modules' @@ -104,6 +104,7 @@ subprojects { /***************************************************************************** * Rest test config * *****************************************************************************/ + apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.rest-test' project.integTest { dependsOn project.assemble @@ -116,7 +117,7 @@ subprojects { mustRunAfter ':distribution:integ-test-zip:integTest#stop' } } - + processTestResources { inputs.properties(project(':distribution').restTestExpansions) MavenFilteringHack.filter(it, project(':distribution').restTestExpansions) diff --git a/qa/backwards-5.0/build.gradle b/qa/backwards-5.0/build.gradle index 5347429f03f..6dd165121b7 100644 --- a/qa/backwards-5.0/build.gradle +++ b/qa/backwards-5.0/build.gradle @@ -1,3 +1,23 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.rest-test' /* This project runs the core REST tests against a 2 node cluster where one of the nodes has a different minor. diff --git a/qa/smoke-test-client/build.gradle b/qa/smoke-test-client/build.gradle index 260516a5bf6..fca4177d3bb 100644 --- a/qa/smoke-test-client/build.gradle +++ b/qa/smoke-test-client/build.gradle @@ -17,10 +17,11 @@ * under the License. */ +apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.rest-test' // TODO: this test works, but it isn't really a rest test...should we have another plugin for "non rest test that just needs N clusters?" dependencies { testCompile project(path: ':client:transport', configuration: 'runtime') // randomly swapped in as a transport -} \ No newline at end of file +} diff --git a/qa/smoke-test-http/build.gradle b/qa/smoke-test-http/build.gradle index 0bdacc1d48a..2fb61243fe8 100644 --- a/qa/smoke-test-http/build.gradle +++ b/qa/smoke-test-http/build.gradle @@ -17,8 +17,9 @@ * under the License. */ +apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.rest-test' dependencies { testCompile project(path: ':modules:transport-netty4', configuration: 'runtime') // for http -} \ No newline at end of file +} diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index 4c39d80a674..752e18dc917 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -22,7 +22,6 @@ package org.elasticsearch.http; import org.apache.http.message.BasicHeader; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; @@ -317,7 +316,7 @@ public class ContextAndHeaderTransportIT extends HttpSmokeTestCase { } @Override - protected boolean apply(String action, ActionRequest request, ActionListener listener) { + protected boolean apply(String action, ActionRequest request, ActionListener listener) { requests.add(new RequestAndHeaders(threadPool.getThreadContext().getHeaders(), request)); return true; } diff --git a/qa/smoke-test-ingest-disabled/build.gradle b/qa/smoke-test-ingest-disabled/build.gradle index 08dfbf8ae7a..1d7491a3517 100644 --- a/qa/smoke-test-ingest-disabled/build.gradle +++ b/qa/smoke-test-ingest-disabled/build.gradle @@ -17,6 +17,7 @@ * under the License. */ +apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.rest-test' dependencies { diff --git a/qa/smoke-test-ingest-with-all-dependencies/build.gradle b/qa/smoke-test-ingest-with-all-dependencies/build.gradle index df90bf5b982..e4ac1f29f89 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/build.gradle +++ b/qa/smoke-test-ingest-with-all-dependencies/build.gradle @@ -17,6 +17,7 @@ * under the License. */ +apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.rest-test' dependencies { diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index f39f790ba09..52bf2427463 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -1,4 +1,23 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.rest-test' integTest { diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index ab69b02fc8c..a5cf0839639 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -19,6 +19,7 @@ import org.elasticsearch.gradle.MavenFilteringHack +apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.rest-test' ext.pluginsCount = 0 @@ -40,4 +41,3 @@ processTestResources { inputs.properties(expansions) MavenFilteringHack.filter(it, expansions) } - diff --git a/qa/smoke-test-reindex-with-painless/build.gradle b/qa/smoke-test-reindex-with-painless/build.gradle index c857db85bfa..d9921d85d9a 100644 --- a/qa/smoke-test-reindex-with-painless/build.gradle +++ b/qa/smoke-test-reindex-with-painless/build.gradle @@ -17,6 +17,7 @@ * under the License. */ +apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.rest-test' integTest { diff --git a/qa/smoke-test-tribe-node/build.gradle b/qa/smoke-test-tribe-node/build.gradle index 6e108e87043..36b0a6ecd1c 100644 --- a/qa/smoke-test-tribe-node/build.gradle +++ b/qa/smoke-test-tribe-node/build.gradle @@ -21,6 +21,7 @@ import org.elasticsearch.gradle.test.ClusterConfiguration import org.elasticsearch.gradle.test.ClusterFormationTasks import org.elasticsearch.gradle.test.NodeInfo +apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.rest-test' List oneNodes From 232af512f4de3861e197d7059b2019263b7c0246 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 4 Jan 2017 14:26:55 -0500 Subject: [PATCH 094/119] Switch from standalone-test to standalone-rest-test standalone-rest-test doesn't configure unit tests and for these integ test only tests, that is what we want. --- .../elasticsearch/gradle/BuildPlugin.groovy | 6 ++++-- .../gradle/doc/DocsTestPlugin.groovy | 2 +- .../gradle/test/RestTestPlugin.groovy | 4 ++-- ...groovy => StandaloneRestTestPlugin.groovy} | 12 ++++++++--- .../gradle/test/StandaloneTestPlugin.groovy | 7 +++++-- ...sticsearch.standalone-rest-test.properties | 20 +++++++++++++++++++ distribution/build.gradle | 4 ++-- qa/backwards-5.0/build.gradle | 2 +- qa/evil-tests/build.gradle | 2 +- qa/no-bootstrap-tests/build.gradle | 1 - qa/smoke-test-client/build.gradle | 2 +- qa/smoke-test-http/build.gradle | 2 +- qa/smoke-test-ingest-disabled/build.gradle | 2 +- .../build.gradle | 2 +- qa/smoke-test-multinode/build.gradle | 2 +- qa/smoke-test-plugins/build.gradle | 2 +- .../build.gradle | 2 +- qa/smoke-test-tribe-node/build.gradle | 2 +- 18 files changed, 53 insertions(+), 23 deletions(-) rename buildSrc/src/main/groovy/org/elasticsearch/gradle/test/{StandaloneTestBasePlugin.groovy => StandaloneRestTestPlugin.groovy} (85%) create mode 100644 buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.standalone-rest-test.properties diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 1cbbe0ac26d..01bab85b019 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -55,8 +55,10 @@ class BuildPlugin implements Plugin { @Override void apply(Project project) { - if (project.pluginManager.hasPlugin('elasticsearch.standalone-test')) { - throw new InvalidUserDataException('elasticsearch.standalone-test and elasticsearch.build are mutually exclusive') + if (project.pluginManager.hasPlugin('elasticsearch.standalone-rest-test')) { + throw new InvalidUserDataException('elasticsearch.standalone-test, ' + + 'elasticearch.standalone-rest-test, and elasticsearch.build ' + + 'are mutually exclusive') } project.pluginManager.apply('java') project.pluginManager.apply('carrotsearch.randomized-testing') diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index bb56360645f..66f9f0d4c4e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -30,7 +30,7 @@ public class DocsTestPlugin extends RestTestPlugin { @Override public void apply(Project project) { - project.pluginManager.apply('elasticsearch.standalone-test') + project.pluginManager.apply('elasticsearch.standalone-rest-test') super.apply(project) Map defaultSubstitutions = [ /* These match up with the asciidoc syntax for substitutions but diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy index e4f4b35cc78..176b02cf9b0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy @@ -25,14 +25,14 @@ import org.gradle.api.Project /** * Adds support for starting an Elasticsearch cluster before running integration - * tests. Used in conjunction with {@link StandaloneTestBasePlugin} for qa + * tests. Used in conjunction with {@link StandaloneRestTestPlugin} for qa * projects and in conjunction with {@link BuildPlugin} for testing the rest * client. */ public class RestTestPlugin implements Plugin { List REQUIRED_PLUGINS = [ 'elasticsearch.build', - 'elasticsearch.standalone-test'] + 'elasticsearch.standalone-rest-test'] @Override public void apply(Project project) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy similarity index 85% rename from buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy rename to buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index b9b865cb62d..6e017671017 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -30,13 +30,19 @@ import org.gradle.api.Project import org.gradle.api.Task import org.gradle.api.plugins.JavaBasePlugin -/** Configures the build to have a rest integration test. */ -public class StandaloneTestBasePlugin implements Plugin { +/** + * Configures the build to compile tests against Elasticsearch's test framework + * and run REST tests. Use BuildPlugin if you want to build main code as well + * as tests. + */ +public class StandaloneRestTestPlugin implements Plugin { @Override public void apply(Project project) { if (project.pluginManager.hasPlugin('elasticsearch.build')) { - throw new InvalidUserDataException('elasticsearch.standalone-test and elasticsearch.build are mutually exclusive') + throw new InvalidUserDataException('elasticsearch.standalone-test, ' + + 'elasticsearch.standalone-test, and elasticsearch.build are ' + + 'mutually exclusive') } project.pluginManager.apply(JavaBasePlugin) project.pluginManager.apply(RandomizedTestingPlugin) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy index fefd08fe4e5..de52d75c600 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy @@ -25,12 +25,15 @@ import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.plugins.JavaBasePlugin -/** A plugin to add tests only. Used for QA tests that run arbitrary unit tests. */ +/** + * Configures the build to compile against Elasticsearch's test framework and + * run integration and unit tests. Use BuildPlugin if you want to build main + * code as well as tests. */ public class StandaloneTestPlugin implements Plugin { @Override public void apply(Project project) { - project.pluginManager.apply(StandaloneTestBasePlugin) + project.pluginManager.apply(StandaloneRestTestPlugin) Map testOptions = [ name: 'test', diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.standalone-rest-test.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.standalone-rest-test.properties new file mode 100644 index 00000000000..2daf4dc27c0 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.standalone-rest-test.properties @@ -0,0 +1,20 @@ +# +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +implementation-class=org.elasticsearch.gradle.test.StandaloneRestTestPlugin diff --git a/distribution/build.gradle b/distribution/build.gradle index 83f82d9cd6b..8fddd043229 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -1,5 +1,5 @@ /* - * Licensed to Elasticsearch under one or more contributor + // * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under @@ -104,7 +104,7 @@ subprojects { /***************************************************************************** * Rest test config * *****************************************************************************/ - apply plugin: 'elasticsearch.standalone-test' + apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' project.integTest { dependsOn project.assemble diff --git a/qa/backwards-5.0/build.gradle b/qa/backwards-5.0/build.gradle index 6dd165121b7..c5e96757071 100644 --- a/qa/backwards-5.0/build.gradle +++ b/qa/backwards-5.0/build.gradle @@ -17,7 +17,7 @@ * under the License. */ -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' /* This project runs the core REST tests against a 2 node cluster where one of the nodes has a different minor. diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index cba9334fbca..472fc872616 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -42,7 +42,7 @@ thirdPartyAudit.excludes = [ 'com.google.common.cache.Striped64$Cell', 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', - + // missing class 'com.ibm.icu.lang.UCharacter', ] diff --git a/qa/no-bootstrap-tests/build.gradle b/qa/no-bootstrap-tests/build.gradle index 16ac5e27693..25731a99dee 100644 --- a/qa/no-bootstrap-tests/build.gradle +++ b/qa/no-bootstrap-tests/build.gradle @@ -23,4 +23,3 @@ */ apply plugin: 'elasticsearch.standalone-test' - diff --git a/qa/smoke-test-client/build.gradle b/qa/smoke-test-client/build.gradle index fca4177d3bb..888d9325242 100644 --- a/qa/smoke-test-client/build.gradle +++ b/qa/smoke-test-client/build.gradle @@ -17,7 +17,7 @@ * under the License. */ -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' // TODO: this test works, but it isn't really a rest test...should we have another plugin for "non rest test that just needs N clusters?" diff --git a/qa/smoke-test-http/build.gradle b/qa/smoke-test-http/build.gradle index 2fb61243fe8..f394075e0f0 100644 --- a/qa/smoke-test-http/build.gradle +++ b/qa/smoke-test-http/build.gradle @@ -17,7 +17,7 @@ * under the License. */ -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { diff --git a/qa/smoke-test-ingest-disabled/build.gradle b/qa/smoke-test-ingest-disabled/build.gradle index 1d7491a3517..4c4d9c2da12 100644 --- a/qa/smoke-test-ingest-disabled/build.gradle +++ b/qa/smoke-test-ingest-disabled/build.gradle @@ -17,7 +17,7 @@ * under the License. */ -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { diff --git a/qa/smoke-test-ingest-with-all-dependencies/build.gradle b/qa/smoke-test-ingest-with-all-dependencies/build.gradle index e4ac1f29f89..2cfa3af434e 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/build.gradle +++ b/qa/smoke-test-ingest-with-all-dependencies/build.gradle @@ -17,7 +17,7 @@ * under the License. */ -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index 52bf2427463..fc196fd52a4 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -17,7 +17,7 @@ * under the License. */ -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' integTest { diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index a5cf0839639..6fd722e409c 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -19,7 +19,7 @@ import org.elasticsearch.gradle.MavenFilteringHack -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' ext.pluginsCount = 0 diff --git a/qa/smoke-test-reindex-with-painless/build.gradle b/qa/smoke-test-reindex-with-painless/build.gradle index d9921d85d9a..7092c0a7b48 100644 --- a/qa/smoke-test-reindex-with-painless/build.gradle +++ b/qa/smoke-test-reindex-with-painless/build.gradle @@ -17,7 +17,7 @@ * under the License. */ -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' integTest { diff --git a/qa/smoke-test-tribe-node/build.gradle b/qa/smoke-test-tribe-node/build.gradle index 36b0a6ecd1c..94789b17fdb 100644 --- a/qa/smoke-test-tribe-node/build.gradle +++ b/qa/smoke-test-tribe-node/build.gradle @@ -21,7 +21,7 @@ import org.elasticsearch.gradle.test.ClusterConfiguration import org.elasticsearch.gradle.test.ClusterFormationTasks import org.elasticsearch.gradle.test.NodeInfo -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' List oneNodes From 0a6827a5cc22c892b11ceb78ab456510736a37d1 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 5 Jan 2017 11:29:41 +0100 Subject: [PATCH 095/119] Make RestHighLevelClient non final --- .../main/java/org/elasticsearch/client/RestHighLevelClient.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 4bde0d37e61..58ecc5f9c2d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -48,4 +48,6 @@ public final class RestHighLevelClient { return false; } } + + } From 97f3a9bd79a6b919b79185b8f2b5903758192e83 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 5 Jan 2017 11:31:18 +0100 Subject: [PATCH 096/119] Relax LiveVersionMapTests.testRamBytesUsed. With Java9's new restrictions we cannot compute ram usage as accurately as before. See https://issues.apache.org/jira/browse/LUCENE-7595. --- .../org/elasticsearch/index/engine/LiveVersionMapTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index ed80b98c7f9..a84f78ca3d9 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -36,8 +36,8 @@ public class LiveVersionMapTests extends ESTestCase { } long actualRamBytesUsed = RamUsageTester.sizeOf(map); long estimatedRamBytesUsed = map.ramBytesUsed(); - // less than 25% off - assertEquals(actualRamBytesUsed, estimatedRamBytesUsed, actualRamBytesUsed / 4); + // less than 50% off + assertEquals(actualRamBytesUsed, estimatedRamBytesUsed, actualRamBytesUsed / 2); // now refresh map.beforeRefresh(); From ccc4e414ff2ff8f0e1a45c2144fafa6733fddc72 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 5 Jan 2017 17:15:34 +0100 Subject: [PATCH 097/119] remove double slash from license header --- distribution/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/distribution/build.gradle b/distribution/build.gradle index 8fddd043229..7c01acc1c91 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -1,5 +1,5 @@ /* - // * Licensed to Elasticsearch under one or more contributor + * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under From cfc106d7213fbdfa78d2c74387586ce2c02d3842 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 5 Jan 2017 18:11:58 +0100 Subject: [PATCH 098/119] Don't close store under CancellableThreads (#22434) #22325 changed the recovery retry logic to use unique recovery ids. The change also introduced an issue, however, which made it possible for the shard store to be closed under CancellableThreads, triggering assertions in the node locking logic. This commit limits the use of CancellableThreads only to the part where we wait on the old recovery target to be closed. --- .../recovery/RecoveriesCollection.java | 24 +++++++------------ .../indices/recovery/RecoveryTarget.java | 15 ++++++++---- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index 3bee3febf3f..aed23256108 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -107,25 +107,17 @@ public class RecoveriesCollection { } // Closes the current recovery target - final AtomicBoolean successfulReset = new AtomicBoolean(); - try { - final RecoveryTarget finalOldRecoveryTarget = oldRecoveryTarget; - newRecoveryTarget.CancellableThreads().executeIO(() -> successfulReset.set(finalOldRecoveryTarget.resetRecovery())); - } catch (CancellableThreads.ExecutionCancelledException e) { - // new recovery target is already cancelled (probably due to shard closing or recovery source changing) - assert onGoingRecoveries.containsKey(newRecoveryTarget.recoveryId()) == false; - logger.trace("{} recovery reset cancelled, recovery from {}, id [{}], previous id [{}]", newRecoveryTarget.shardId(), - newRecoveryTarget.sourceNode(), newRecoveryTarget.recoveryId(), oldRecoveryTarget.recoveryId()); - oldRecoveryTarget.cancel("recovery reset cancelled"); // if finalOldRecoveryTarget.resetRecovery did not even get to execute - return null; - } - if (successfulReset.get() == false) { - cancelRecovery(newRecoveryTarget.recoveryId(), "failed to reset recovery"); - return null; - } else { + boolean successfulReset = oldRecoveryTarget.resetRecovery(newRecoveryTarget.CancellableThreads()); + if (successfulReset) { logger.trace("{} restarted recovery from {}, id [{}], previous id [{}]", newRecoveryTarget.shardId(), newRecoveryTarget.sourceNode(), newRecoveryTarget.recoveryId(), oldRecoveryTarget.recoveryId()); return newRecoveryTarget; + } else { + logger.trace("{} recovery could not be reset as it is already cancelled, recovery from {}, id [{}], previous id [{}]", + newRecoveryTarget.shardId(), newRecoveryTarget.sourceNode(), newRecoveryTarget.recoveryId(), + oldRecoveryTarget.recoveryId()); + cancelRecovery(newRecoveryTarget.recoveryId(), "recovery cancelled during reset"); + return null; } } catch (Exception e) { // fail shard to be safe diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 4311d3b2ab1..00dd019aac4 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRefCounted; @@ -56,6 +57,8 @@ import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -182,17 +185,21 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget * Closes the current recovery target and waits up to a certain timeout for resources to be freed. * Returns true if resetting the recovery was successful, false if the recovery target is already cancelled / failed or marked as done. */ - boolean resetRecovery() throws InterruptedException, IOException { + boolean resetRecovery(CancellableThreads newTargetCancellableThreads) throws IOException { if (finished.compareAndSet(false, true)) { try { - // yes, this is just a logger call in a try-finally block. The reason for this is that resetRecovery is called from - // CancellableThreads and we have to make sure that all references to IndexShard are cleaned up before exiting this method logger.debug("reset of recovery with shard {} and id [{}]", shardId, recoveryId); } finally { // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now. decRef(); } - closedLatch.await(); + try { + newTargetCancellableThreads.execute(closedLatch::await); + } catch (CancellableThreads.ExecutionCancelledException e) { + logger.trace("new recovery target cancelled for shard {} while waiting on old recovery target with id [{}] to close", + shardId, recoveryId); + return false; + } RecoveryState.Stage stage = indexShard.recoveryState().getStage(); if (indexShard.recoveryState().getPrimary() && (stage == RecoveryState.Stage.FINALIZE || stage == RecoveryState.Stage.DONE)) { // once primary relocation has moved past the finalization step, the relocation source can be moved to RELOCATED state From 182e8115ded245a15cedfdedc9c3966c62f92741 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 5 Jan 2017 19:12:21 +0100 Subject: [PATCH 099/119] [TEST] Fix IndexRecoveryIT.testDisconnectsDuringRecovery The test currently checks that the recovering shard is not failed when it is not a primary relocation that has moved past the finalization step. Checking if it has moved past that step is done by intercepting the request between the replication source and the target and checking if it has seen then WAIT_FOR_CLUSTERSTATE action as this is the next action that is called after finalization. This action can, however, occur only after the shard was already failed, and thus trip the assertion. This commit changes the check to look out for the FINALIZE action, independently of whether it succeeded or not. --- .../elasticsearch/indices/recovery/IndexRecoveryIT.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 6bbccb4cfb7..424edb42e68 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -724,14 +724,14 @@ public class IndexRecoveryIT extends ESIntegTestCase { } }); - final AtomicBoolean seenWaitForClusterState = new AtomicBoolean(); + final AtomicBoolean finalized = new AtomicBoolean(); blueMockTransportService.addDelegate(redMockTransportService, new MockTransportService.DelegateTransport(blueMockTransportService.original()) { @Override protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { logger.info("--> sending request {} on {}", action, connection.getNode()); - if (action.equals(PeerRecoveryTargetService.Actions.WAIT_CLUSTERSTATE)) { - seenWaitForClusterState.set(true); + if (action.equals(PeerRecoveryTargetService.Actions.FINALIZE)) { + finalized.set(true); } super.sendRequest(connection, requestId, action, request, options); } @@ -743,7 +743,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { logger.info("--> sending request {} on {}", action, connection.getNode()); - if (primaryRelocation == false || seenWaitForClusterState.get() == false) { + if ((primaryRelocation && finalized.get()) == false) { assertNotEquals(action, ShardStateAction.SHARD_FAILED_ACTION_NAME); } super.sendRequest(connection, requestId, action, request, options); From 93947923925db3db0a09edea0cf34bf8934a6c59 Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 3 Jan 2017 22:48:59 +0100 Subject: [PATCH 100/119] remove unused ParseFieldMatcher imports/arguments --- .../action/search/SearchRequest.java | 1 - .../BlobStoreIndexShardSnapshot.java | 3 --- .../BlobStoreIndexShardSnapshots.java | 2 -- .../bucket/range/RangeAggregationBuilder.java | 2 +- .../bucket/range/RangeAggregator.java | 3 +-- .../date/DateRangeAggregationBuilder.java | 2 +- .../GeoDistanceAggregationBuilder.java | 4 +--- .../range/ip/IpRangeAggregationBuilder.java | 4 +--- .../bucket/terms/support/IncludeExclude.java | 3 --- .../MovAvgPipelineAggregationBuilder.java | 2 +- .../pipeline/movavg/models/EwmaModel.java | 4 +--- .../movavg/models/HoltLinearModel.java | 4 +--- .../movavg/models/HoltWintersModel.java | 9 +++------ .../pipeline/movavg/models/LinearModel.java | 4 +--- .../pipeline/movavg/models/MovAvgModel.java | 4 +--- .../pipeline/movavg/models/SimpleModel.java | 4 +--- .../internal/FilteredSearchContext.java | 1 - .../search/sort/GeoDistanceSortBuilder.java | 2 -- .../search/suggest/SuggestBuilder.java | 2 -- .../search/suggest/SuggestionBuilder.java | 2 -- .../search/suggest/phrase/SmoothingModel.java | 2 -- .../suggest/term/TermSuggestionBuilder.java | 2 -- .../elasticsearch/snapshots/SnapshotInfo.java | 1 - .../pipeline/moving/avg/MovAvgUnitTests.java | 5 ++--- .../suggest/CustomSuggesterSearchIT.java | 2 -- .../matrix/stats/MatrixStatsParser.java | 3 +-- .../support/MultiValuesSourceParser.java | 20 +++++++------------ .../TransportMultiPercolateAction.java | 3 --- .../percolator/TransportPercolateAction.java | 2 -- 29 files changed, 24 insertions(+), 78 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 3e65bda8ddf..9c69f1a763f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index e5f04f02032..37b728d43d6 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -23,7 +23,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.ByteSizeValue; @@ -475,7 +474,6 @@ public class BlobStoreIndexShardSnapshot implements ToXContent { private static final ParseField PARSE_NUMBER_OF_FILES = new ParseField("number_of_files"); private static final ParseField PARSE_TOTAL_SIZE = new ParseField("total_size"); private static final ParseField PARSE_FILES = new ParseField("files"); - private static final ParseFieldMatcher parseFieldMatcher = ParseFieldMatcher.EMPTY; /** * Serializes shard snapshot metadata info into JSON @@ -559,5 +557,4 @@ public class BlobStoreIndexShardSnapshot implements ToXContent { return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles), startTime, time, numberOfFiles, totalSize); } - } diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java index 186186ff79b..359c3165f53 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.snapshots.blobstore; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -153,7 +152,6 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To static final ParseField FILES = new ParseField("files"); static final ParseField SNAPSHOTS = new ParseField("snapshots"); } - private static final ParseFieldMatcher parseFieldMatcher = ParseFieldMatcher.EMPTY; /** * Writes index file for the shard in the following format. diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java index 8f4f9d6ccd8..e7f0a8a6d80 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java @@ -57,7 +57,7 @@ public class RangeAggregationBuilder extends AbstractRangeBuilder(parseArrayToSet(parser)), null); } else if (token == XContentParser.Token.START_OBJECT) { - ParseFieldMatcher parseFieldMatcher = context.getParseFieldMatcher(); String currentFieldName = null; Integer partition = null, numPartitions = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java index 30db30fcafd..bc973ad442f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/MovAvgPipelineAggregationBuilder.java @@ -406,7 +406,7 @@ public class MovAvgPipelineAggregationBuilder extends AbstractPipelineAggregatio MovAvgModel.AbstractModelParser modelParser = movingAverageMdelParserRegistry.lookup(model, parser.getTokenLocation()); MovAvgModel movAvgModel; try { - movAvgModel = modelParser.parse(settings, pipelineAggregatorName, factory.window(), context.getParseFieldMatcher()); + movAvgModel = modelParser.parse(settings, pipelineAggregatorName, factory.window()); } catch (ParseException exception) { throw new ParsingException(parser.getTokenLocation(), "Could not parse settings for model [" + model + "].", exception); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/EwmaModel.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/EwmaModel.java index c7e6b0e8980..26fb0333b18 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/EwmaModel.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/EwmaModel.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.pipeline.movavg.models; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -127,8 +126,7 @@ public class EwmaModel extends MovAvgModel { public static final AbstractModelParser PARSER = new AbstractModelParser() { @Override - public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, - ParseFieldMatcher parseFieldMatcher) throws ParseException { + public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize) throws ParseException { double alpha = parseDoubleParam(settings, "alpha", DEFAULT_ALPHA); checkUnrecognizedParams(settings); return new EwmaModel(alpha); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltLinearModel.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltLinearModel.java index d8a591972ec..18193337385 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltLinearModel.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltLinearModel.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.pipeline.movavg.models; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -191,8 +190,7 @@ public class HoltLinearModel extends MovAvgModel { public static final AbstractModelParser PARSER = new AbstractModelParser() { @Override - public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, - ParseFieldMatcher parseFieldMatcher) throws ParseException { + public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize) throws ParseException { double alpha = parseDoubleParam(settings, "alpha", DEFAULT_ALPHA); double beta = parseDoubleParam(settings, "beta", DEFAULT_BETA); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java index 3fe2e81ff9c..92b2e4d3ea2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/HoltWintersModel.java @@ -23,7 +23,6 @@ package org.elasticsearch.search.aggregations.pipeline.movavg.models; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -59,11 +58,10 @@ public class HoltWintersModel extends MovAvgModel { * Parse a string SeasonalityType into the byte enum * * @param text SeasonalityType in string format (e.g. "add") - * @param parseFieldMatcher Matcher for field names * @return SeasonalityType enum */ @Nullable - public static SeasonalityType parse(String text, ParseFieldMatcher parseFieldMatcher) { + public static SeasonalityType parse(String text) { if (text == null) { return null; } @@ -379,8 +377,7 @@ public class HoltWintersModel extends MovAvgModel { public static final AbstractModelParser PARSER = new AbstractModelParser() { @Override - public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, - ParseFieldMatcher parseFieldMatcher) throws ParseException { + public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize) throws ParseException { double alpha = parseDoubleParam(settings, "alpha", DEFAULT_ALPHA); double beta = parseDoubleParam(settings, "beta", DEFAULT_BETA); @@ -399,7 +396,7 @@ public class HoltWintersModel extends MovAvgModel { Object value = settings.get("type"); if (value != null) { if (value instanceof String) { - seasonalityType = SeasonalityType.parse((String)value, parseFieldMatcher); + seasonalityType = SeasonalityType.parse((String)value); settings.remove("type"); } else { throw new ParseException("Parameter [type] must be a String, type `" diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/LinearModel.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/LinearModel.java index 089f3a430ca..3eed0bf603b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/LinearModel.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/LinearModel.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.pipeline.movavg.models; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -106,8 +105,7 @@ public class LinearModel extends MovAvgModel { public static final AbstractModelParser PARSER = new AbstractModelParser() { @Override - public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, - ParseFieldMatcher parseFieldMatcher) throws ParseException { + public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize) throws ParseException { checkUnrecognizedParams(settings); return new LinearModel(); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModel.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModel.java index 0837eca38bd..f64117236d6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModel.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModel.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.pipeline.movavg.models; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -143,11 +142,10 @@ public abstract class MovAvgModel implements NamedWriteable, ToXContent { * @param settings Map of settings, extracted from the request * @param pipelineName Name of the parent pipeline agg * @param windowSize Size of the window for this moving avg - * @param parseFieldMatcher Matcher for field names * @return A fully built moving average model */ public abstract MovAvgModel parse(@Nullable Map settings, String pipelineName, - int windowSize, ParseFieldMatcher parseFieldMatcher) throws ParseException; + int windowSize) throws ParseException; /** diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/SimpleModel.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/SimpleModel.java index 14544881883..e30a59d2887 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/SimpleModel.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/SimpleModel.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.pipeline.movavg.models; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -99,8 +98,7 @@ public class SimpleModel extends MovAvgModel { public static final AbstractModelParser PARSER = new AbstractModelParser() { @Override - public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize, - ParseFieldMatcher parseFieldMatcher) throws ParseException { + public MovAvgModel parse(@Nullable Map settings, String pipelineName, int windowSize) throws ParseException { checkUnrecognizedParams(settings); return new SimpleModel(); } diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 2cf52fb11f5..fb1e2132dee 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -25,7 +25,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 7111cee5766..720637229c9 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -29,7 +29,6 @@ import org.apache.lucene.util.BitSet; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoDistance.FixedSourceDistance; @@ -398,7 +397,6 @@ public class GeoDistanceSortBuilder extends SortBuilder */ public static GeoDistanceSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException { XContentParser parser = context.parser(); - ParseFieldMatcher parseFieldMatcher = context.getParseFieldMatcher(); String fieldName = null; List geoPoints = new ArrayList<>(); DistanceUnit unit = DistanceUnit.DEFAULT; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java index 84c3da8618e..1de59eb3e26 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.suggest; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -140,7 +139,6 @@ public class SuggestBuilder extends ToXContentToBytes implements Writeable { public static SuggestBuilder fromXContent(QueryParseContext parseContext, Suggesters suggesters) throws IOException { XContentParser parser = parseContext.parser(); - ParseFieldMatcher parseFieldMatcher = parseContext.getParseFieldMatcher(); SuggestBuilder suggestBuilder = new SuggestBuilder(); String fieldName = null; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java index 4f6c4d8c553..2062b696cfa 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestionBuilder.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; @@ -257,7 +256,6 @@ public abstract class SuggestionBuilder> implemen static SuggestionBuilder fromXContent(QueryParseContext parseContext, Suggesters suggesters) throws IOException { XContentParser parser = parseContext.parser(); - ParseFieldMatcher parsefieldMatcher = parseContext.getParseFieldMatcher(); XContentParser.Token token; String currentFieldName = null; String suggestText = null; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/SmoothingModel.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/SmoothingModel.java index 26e0b80064b..82f106130ef 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/SmoothingModel.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/SmoothingModel.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.suggest.phrase; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.xcontent.ToXContent; @@ -67,7 +66,6 @@ public abstract class SmoothingModel implements NamedWriteable, ToXContent { public static SmoothingModel fromXContent(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); - ParseFieldMatcher parseFieldMatcher = parseContext.getParseFieldMatcher(); XContentParser.Token token; String fieldName = null; SmoothingModel model = null; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java index f7be69e4804..9a7238dbe16 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.spell.LuceneLevenshteinDistance; import org.apache.lucene.search.spell.NGramDistance; import org.apache.lucene.search.spell.StringDistance; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -391,7 +390,6 @@ public class TermSuggestionBuilder extends SuggestionBuilder otherOptions) throws IOException { + Map otherOptions) throws IOException { if (MULTIVALUE_MODE_FIELD.match(currentFieldName)) { if (token == XContentParser.Token.VALUE_STRING) { otherOptions.put(MULTIVALUE_MODE_FIELD, parser.text()); diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java index 8d87d1fcd1c..c2e4b4f0c3f 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java @@ -20,13 +20,12 @@ package org.elasticsearch.search.aggregations.support; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.script.Script; -import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregationBuilder.CommonFields; +import org.elasticsearch.search.aggregations.Aggregator; import java.io.IOException; import java.util.ArrayList; @@ -85,8 +84,6 @@ public abstract class MultiValuesSourceParser implement String format = null; Map missingMap = null; Map otherOptions = new HashMap<>(); - final ParseFieldMatcher parseFieldMatcher = context.getParseFieldMatcher(); - XContentParser.Token token; String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -101,7 +98,7 @@ public abstract class MultiValuesSourceParser implement throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " + "Multi-field aggregations do not support scripts."); - } else if (!token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) { + } else if (!token(aggregationName, currentFieldName, token, parser, otherOptions)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } @@ -116,7 +113,7 @@ public abstract class MultiValuesSourceParser implement "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " + "Multi-field aggregations do not support scripts."); - } else if (!token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) { + } else if (!token(aggregationName, currentFieldName, token, parser, otherOptions)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } @@ -135,11 +132,11 @@ public abstract class MultiValuesSourceParser implement "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } } - } else if (!token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) { + } else if (!token(aggregationName, currentFieldName, token, parser, otherOptions)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } - } else if (!token(aggregationName, currentFieldName, token, parser, context.getParseFieldMatcher(), otherOptions)) { + } else if (!token(aggregationName, currentFieldName, token, parser, otherOptions)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } @@ -198,8 +195,7 @@ public abstract class MultiValuesSourceParser implement * the target type of the final value output by the aggregation * @param otherOptions * a {@link Map} containing the extra options parsed by the - * {@link #token(String, String, org.elasticsearch.common.xcontent.XContentParser.Token, - * XContentParser, ParseFieldMatcher, Map)} + * {@link #token(String, String, XContentParser.Token, XContentParser, Map)} * method * @return the created factory */ @@ -219,8 +215,6 @@ public abstract class MultiValuesSourceParser implement * the current token for the parser * @param parser * the parser - * @param parseFieldMatcher - * the {@link ParseFieldMatcher} to use to match field names * @param otherOptions * a {@link Map} of options to be populated by successive calls * to this method which will then be passed to the @@ -232,5 +226,5 @@ public abstract class MultiValuesSourceParser implement * if an error occurs whilst parsing */ protected abstract boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser, - ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException; + Map otherOptions) throws IOException; } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportMultiPercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportMultiPercolateAction.java index 119b8e35bc9..668df18678f 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportMultiPercolateAction.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/TransportMultiPercolateAction.java @@ -32,7 +32,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; @@ -53,7 +52,6 @@ import java.util.Map; public class TransportMultiPercolateAction extends HandledTransportAction { private final Client client; - private final ParseFieldMatcher parseFieldMatcher; private final SearchRequestParsers searchRequestParsers; private final NamedXContentRegistry xContentRegistry; @@ -66,7 +64,6 @@ public class TransportMultiPercolateAction extends HandledTransportAction { private final Client client; - private final ParseFieldMatcher parseFieldMatcher; private final SearchRequestParsers searchRequestParsers; private final NamedXContentRegistry xContentRegistry; @@ -76,7 +75,6 @@ public class TransportPercolateAction extends HandledTransportAction Date: Tue, 3 Jan 2017 23:26:05 +0100 Subject: [PATCH 101/119] ObjectParser to no longer require ParseFieldMatcherSupplier as its Context ParseFieldMatcher as well as ParseFieldMatcherSupplier will be soon removed, hence the ObjectParser's context doesn't need to be a ParseFieldMatcherSupplier anymore. That will allow to remove ParseFieldMatcherSupplier's implementations, little by little. --- .../common/xcontent/AbstractObjectParser.java | 3 +-- .../common/xcontent/ConstructingObjectParser.java | 5 ++--- .../org/elasticsearch/common/xcontent/ObjectParser.java | 7 +++---- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java index 64a5fa4c119..112ba0debda 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.xcontent; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -34,7 +33,7 @@ import java.util.function.BiFunction; /** * Superclass for {@link ObjectParser} and {@link ConstructingObjectParser}. Defines most of the "declare" methods so they can be shared. */ -public abstract class AbstractObjectParser +public abstract class AbstractObjectParser implements BiFunction, ContextParser { /** diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java index 6e646094d06..fe606c130d1 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.xcontent; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; @@ -74,7 +73,7 @@ import java.util.function.Function; * Note: if optional constructor arguments aren't specified then the number of allocations is always the worst case. *

*/ -public final class ConstructingObjectParser extends AbstractObjectParser { +public final class ConstructingObjectParser extends AbstractObjectParser { /** * Consumer that marks a field as a required constructor argument instead of a real object field. */ @@ -236,7 +235,7 @@ public final class ConstructingObjectParser extends AbstractObjectParser { +public final class ObjectParser extends AbstractObjectParser { /** * Adapts an array (or varags) setter into a list setter. */ @@ -167,7 +166,7 @@ public final class ObjectParser Date: Tue, 3 Jan 2017 23:45:29 +0100 Subject: [PATCH 102/119] fix unchecked generics warnings in ObjectParser --- .../common/xcontent/ObjectParser.java | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java index 3e63bc71e00..e84890db7c8 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java @@ -152,7 +152,7 @@ public final class ObjectParser extends AbstractObjectParser fieldParser = null; + FieldParser fieldParser = null; String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -356,13 +356,13 @@ public final class ObjectParser extends AbstractObjectParser fieldParser, String currentFieldName, Value value, Context context) + private void parseArray(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) throws IOException { assert parser.currentToken() == XContentParser.Token.START_ARRAY : "Token was: " + parser.currentToken(); parseValue(parser, fieldParser, currentFieldName, value, context); } - private void parseValue(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) + private void parseValue(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) throws IOException { try { fieldParser.parser.parse(parser, value, context); @@ -371,7 +371,7 @@ public final class ObjectParser extends AbstractObjectParser fieldParser, String currentFieldName, Value value, Context context) + private void parseSub(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) throws IOException { final XContentParser.Token token = parser.currentToken(); switch (token) { @@ -395,27 +395,27 @@ public final class ObjectParser extends AbstractObjectParser parser = fieldParserMap.get(fieldName); + FieldParser parser = fieldParserMap.get(fieldName); if (parser == null && false == ignoreUnknownFields) { throw new IllegalArgumentException("[" + name + "] unknown field [" + fieldName + "], parser not found"); } return parser; } - public static class FieldParser { - private final Parser parser; + private class FieldParser { + private final Parser parser; private final EnumSet supportedTokens; private final ParseField parseField; private final ValueType type; - public FieldParser(Parser parser, EnumSet supportedTokens, ParseField parseField, ValueType type) { + FieldParser(Parser parser, EnumSet supportedTokens, ParseField parseField, ValueType type) { this.parser = parser; this.supportedTokens = supportedTokens; this.parseField = parseField; this.type = type; } - public void assertSupports(String parserName, XContentParser.Token token, String currentFieldName) { + void assertSupports(String parserName, XContentParser.Token token, String currentFieldName) { if (parseField.match(currentFieldName) == false) { throw new IllegalStateException("[" + parserName + "] parsefield doesn't accept: " + currentFieldName); } From 6102523033740a33faf10dc79a3f755b0de81f6f Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 4 Jan 2017 15:23:24 +0100 Subject: [PATCH 103/119] remove ParseFieldMatcher usages from Script parsing code --- .../action/update/UpdateRequest.java | 3 +-- .../index/query/ScriptQueryBuilder.java | 4 ++-- .../ScriptScoreFunctionBuilder.java | 2 +- .../java/org/elasticsearch/script/Script.java | 24 +++++-------------- .../heuristics/ScriptHeuristic.java | 2 +- .../ScriptedMetricAggregationBuilder.java | 9 ++++--- .../tophits/TopHitsAggregationBuilder.java | 4 ++-- ...ucketScriptPipelineAggregationBuilder.java | 4 ++-- ...ketSelectorPipelineAggregationBuilder.java | 4 ++-- .../support/ValuesSourceParserHelper.java | 13 +++++----- .../search/builder/SearchSourceBuilder.java | 4 ++-- .../search/sort/ScriptSortBuilder.java | 3 ++- .../phrase/PhraseSuggestionBuilder.java | 2 +- .../org/elasticsearch/script/ScriptTests.java | 3 +-- .../script/mustache/TemplateQueryBuilder.java | 2 +- .../mustache/MustacheScriptEngineTests.java | 6 ++--- .../index/reindex/RestReindexAction.java | 4 ++-- 17 files changed, 39 insertions(+), 54 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index a812cb22eb6..d31d0c9ae14 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; @@ -714,7 +713,7 @@ public class UpdateRequest extends InstanceShardOperationRequest if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if ("script".equals(currentFieldName)) { - script = Script.parse(parser, ParseFieldMatcher.EMPTY); + script = Script.parse(parser); } else if ("scripted_upsert".equals(currentFieldName)) { scriptedUpsert = parser.booleanValue(); } else if ("upsert".equals(currentFieldName)) { diff --git a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index 077a687ed83..526fc2b69ce 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -100,7 +100,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder // skip } else if (token == XContentParser.Token.START_OBJECT) { if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { - script = Script.parse(parser, parseContext.getParseFieldMatcher(), parseContext.getDefaultScriptLanguage()); + script = Script.parse(parser, parseContext.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]"); } @@ -110,7 +110,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName)) { boost = parser.floatValue(); } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { - script = Script.parse(parser, parseContext.getParseFieldMatcher(), parseContext.getDefaultScriptLanguage()); + script = Script.parse(parser, parseContext.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]"); } diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java index b1fee2d6252..68913cd9e21 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java @@ -112,7 +112,7 @@ public class ScriptScoreFunctionBuilder extends ScoreFunctionBuilder PARSER = new ObjectParser<>("script", Builder::new); + private static final ObjectParser PARSER = new ObjectParser<>("script", Builder::new); static { // Defines the fields necessary to parse a Script as XContent using an ObjectParser. @@ -224,19 +221,11 @@ public final class Script implements ToXContent, Writeable { } /** - * Convenience method to call {@link Script#parse(XContentParser, ParseFieldMatcher, String)} + * Convenience method to call {@link Script#parse(XContentParser, String)} * using the default scripting language. */ - public static Script parse(XContentParser parser, ParseFieldMatcher matcher) throws IOException { - return parse(parser, matcher, DEFAULT_SCRIPT_LANG); - } - - /** - * Convenience method to call {@link Script#parse(XContentParser, ParseFieldMatcher, String)} using the - * {@link ParseFieldMatcher} and scripting language provided by the {@link QueryParseContext}. - */ - public static Script parse(XContentParser parser, QueryParseContext context) throws IOException { - return parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); + public static Script parse(XContentParser parser) throws IOException { + return parse(parser, DEFAULT_SCRIPT_LANG); } /** @@ -300,13 +289,12 @@ public final class Script implements ToXContent, Writeable { * } * * @param parser The {@link XContentParser} to be used. - * @param matcher The {@link ParseFieldMatcher} to be used. * @param defaultLang The default language to use if no language is specified. The default language isn't necessarily * the one defined by {@link Script#DEFAULT_SCRIPT_LANG} due to backwards compatiblity requirements * related to stored queries using previously default languauges. * @return The parsed {@link Script}. */ - public static Script parse(XContentParser parser, ParseFieldMatcher matcher, String defaultLang) throws IOException { + public static Script parse(XContentParser parser, String defaultLang) throws IOException { Objects.requireNonNull(defaultLang); Token token = parser.currentToken(); @@ -319,7 +307,7 @@ public final class Script implements ToXContent, Writeable { return new Script(ScriptType.INLINE, defaultLang, parser.text(), Collections.emptyMap()); } - return PARSER.apply(parser, () -> matcher).build(defaultLang); + return PARSER.apply(parser, null).build(defaultLang); } private final ScriptType type; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index b49ca9fe1c9..083733f31ae 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -158,7 +158,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { currentFieldName = parser.currentName(); } else { if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { - script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); + script = Script.parse(parser, context.getDefaultScriptLanguage()); } else { throw new ElasticsearchParseException("failed to parse [{}] significance heuristic. unknown object [{}]", heuristicName, currentFieldName); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java index 6b4d1bc1b8f..68a5138271f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java @@ -38,7 +38,6 @@ import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.Objects; @@ -255,13 +254,13 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT || token == XContentParser.Token.VALUE_STRING) { if (INIT_SCRIPT_FIELD.match(currentFieldName)) { - initScript = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); + initScript = Script.parse(parser, context.getDefaultScriptLanguage()); } else if (MAP_SCRIPT_FIELD.match(currentFieldName)) { - mapScript = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); + mapScript = Script.parse(parser, context.getDefaultScriptLanguage()); } else if (COMBINE_SCRIPT_FIELD.match(currentFieldName)) { - combineScript = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); + combineScript = Script.parse(parser, context.getDefaultScriptLanguage()); } else if (REDUCE_SCRIPT_FIELD.match(currentFieldName)) { - reduceScript = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); + reduceScript = Script.parse(parser, context.getDefaultScriptLanguage()); } else if (token == XContentParser.Token.START_OBJECT && PARAMS_FIELD.match(currentFieldName)) { params = parser.map(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java index 7cfc0cf61c1..978060632dc 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java @@ -642,7 +642,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder map = parser.map(); bucketsPathsMap = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java index 8a88f011756..078f50a978f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java @@ -141,7 +141,7 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg } else if (GAP_POLICY.match(currentFieldName)) { gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { - script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); + script = Script.parse(parser, context.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); @@ -163,7 +163,7 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg } } else if (token == XContentParser.Token.START_OBJECT) { if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName)) { - script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); + script = Script.parse(parser, context.getDefaultScriptLanguage()); } else if (BUCKETS_PATH.match(currentFieldName)) { Map map = parser.map(); bucketsPathsMap = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java index 2af21192a44..7b174d789f4 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java @@ -35,30 +35,30 @@ public final class ValuesSourceParserHelper { public static void declareAnyFields( ObjectParser, QueryParseContext> objectParser, boolean scriptable, boolean formattable) { - declareFields(objectParser, scriptable, formattable, false, ValuesSourceType.ANY, null); + declareFields(objectParser, scriptable, formattable, false, null); } public static void declareNumericFields( ObjectParser, QueryParseContext> objectParser, boolean scriptable, boolean formattable, boolean timezoneAware) { - declareFields(objectParser, scriptable, formattable, timezoneAware, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + declareFields(objectParser, scriptable, formattable, timezoneAware, ValueType.NUMERIC); } public static void declareBytesFields( ObjectParser, QueryParseContext> objectParser, boolean scriptable, boolean formattable) { - declareFields(objectParser, scriptable, formattable, false, ValuesSourceType.BYTES, ValueType.STRING); + declareFields(objectParser, scriptable, formattable, false, ValueType.STRING); } public static void declareGeoFields( ObjectParser, QueryParseContext> objectParser, boolean scriptable, boolean formattable) { - declareFields(objectParser, scriptable, formattable, false, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); + declareFields(objectParser, scriptable, formattable, false, ValueType.GEOPOINT); } private static void declareFields( ObjectParser, QueryParseContext> objectParser, - boolean scriptable, boolean formattable, boolean timezoneAware, ValuesSourceType valuesSourceType, ValueType targetValueType) { + boolean scriptable, boolean formattable, boolean timezoneAware, ValueType targetValueType) { objectParser.declareField(ValuesSourceAggregationBuilder::field, XContentParser::text, @@ -84,7 +84,8 @@ public final class ValuesSourceParserHelper { } if (scriptable) { - objectParser.declareField(ValuesSourceAggregationBuilder::script, org.elasticsearch.script.Script::parse, + objectParser.declareField(ValuesSourceAggregationBuilder::script, + (parser, context) -> Script.parse(parser, context.getDefaultScriptLanguage()), Script.SCRIPT_PARSE_FIELD, ObjectParser.ValueType.OBJECT_OR_STRING); } diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 7ecd06c8c83..35c6488136b 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -1341,7 +1341,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ currentFieldName = parser.currentName(); } else if (token.isValue()) { if (SCRIPT_FIELD.match(currentFieldName)) { - script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); + script = Script.parse(parser, context.getDefaultScriptLanguage()); } else if (IGNORE_FAILURE_FIELD.match(currentFieldName)) { ignoreFailure = parser.booleanValue(); } else { @@ -1350,7 +1350,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } } else if (token == XContentParser.Token.START_OBJECT) { if (SCRIPT_FIELD.match(currentFieldName)) { - script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); + script = Script.parse(parser, context.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 6da93b26b8c..f284597e782 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -216,7 +216,8 @@ public class ScriptSortBuilder extends SortBuilder { a -> new ScriptSortBuilder((Script) a[0], (ScriptSortType) a[1])); static { - PARSER.declareField(constructorArg(), Script::parse, Script.SCRIPT_PARSE_FIELD, ValueType.OBJECT_OR_STRING); + PARSER.declareField(constructorArg(), (parser, context) -> Script.parse(parser, context.getDefaultScriptLanguage()), + Script.SCRIPT_PARSE_FIELD, ValueType.OBJECT_OR_STRING); PARSER.declareField(constructorArg(), p -> ScriptSortType.fromString(p.text()), TYPE_FIELD, ValueType.STRING); PARSER.declareString((b, v) -> b.order(SortOrder.fromString(v)), ORDER_FIELD); PARSER.declareString((b, v) -> b.sortMode(SortMode.fromString(v)), SORTMODE_FIELD); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index 20d4f6853c0..12d2af289a9 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -567,7 +567,7 @@ public class PhraseSuggestionBuilder extends SuggestionBuilder s.versionType(VersionType.fromString(i)), new ParseField("version_type")); - PARSER.declareField((p, v, c) -> sourceParser.parse(p, v, c), new ParseField("source"), ValueType.OBJECT); + PARSER.declareField(sourceParser::parse, new ParseField("source"), ValueType.OBJECT); PARSER.declareField((p, v, c) -> destParser.parse(p, v.getDestination(), c), new ParseField("dest"), ValueType.OBJECT); PARSER.declareInt(ReindexRequest::setSize, new ParseField("size")); - PARSER.declareField((p, v, c) -> v.setScript(Script.parse(p, c.getParseFieldMatcher())), new ParseField("script"), + PARSER.declareField((p, v, c) -> v.setScript(Script.parse(p)), new ParseField("script"), ValueType.OBJECT); PARSER.declareString(ReindexRequest::setConflicts, new ParseField("conflicts")); } From dea7d6543958899d08503e722febf8512348321b Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 4 Jan 2017 15:29:09 +0100 Subject: [PATCH 104/119] remove ParseFieldMatcher usages from RestSearchTemplateAction --- .../script/mustache/RestSearchTemplateAction.java | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java index a6d705386fc..192cdc6a463 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java @@ -23,8 +23,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -47,7 +45,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestSearchTemplateAction extends BaseRestHandler { - private static final ObjectParser PARSER; + private static final ObjectParser PARSER; static { PARSER = new ObjectParser<>("search_template"); PARSER.declareField((parser, request, s) -> @@ -105,7 +103,7 @@ public class RestSearchTemplateAction extends BaseRestHandler { // Creates the search template request SearchTemplateRequest searchTemplateRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - searchTemplateRequest = PARSER.parse(parser, new SearchTemplateRequest(), () -> ParseFieldMatcher.EMPTY); + searchTemplateRequest = PARSER.parse(parser, new SearchTemplateRequest(), null); } searchTemplateRequest.setRequest(searchRequest); @@ -113,6 +111,6 @@ public class RestSearchTemplateAction extends BaseRestHandler { } public static SearchTemplateRequest parse(XContentParser parser) throws IOException { - return PARSER.parse(parser, new SearchTemplateRequest(), () -> ParseFieldMatcher.EMPTY); + return PARSER.parse(parser, new SearchTemplateRequest(), null); } } From 723bdc45499daf7034a85570841565df67810955 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 4 Jan 2017 15:32:53 +0100 Subject: [PATCH 105/119] remove ParseFieldMatcher usages from FetchSourceContext --- .../java/org/elasticsearch/action/bulk/BulkRequest.java | 4 ++-- .../org/elasticsearch/action/update/UpdateRequest.java | 2 +- .../org/elasticsearch/index/query/InnerHitBuilder.java | 2 +- .../metrics/tophits/TopHitsAggregationBuilder.java | 6 +++--- .../elasticsearch/search/builder/SearchSourceBuilder.java | 6 +++--- .../search/fetch/subphase/FetchSourceContext.java | 7 +------ 6 files changed, 11 insertions(+), 16 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 2ce9f4d47c4..20d5e64f49a 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -349,7 +349,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques } else if ("fields".equals(currentFieldName)) { throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected"); } else if ("_source".equals(currentFieldName)) { - fetchSourceContext = FetchSourceContext.parse(parser); + fetchSourceContext = FetchSourceContext.fromXContent(parser); } else { throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]"); } @@ -362,7 +362,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); } } else if (token == XContentParser.Token.START_OBJECT && "_source".equals(currentFieldName)) { - fetchSourceContext = FetchSourceContext.parse(parser); + fetchSourceContext = FetchSourceContext.fromXContent(parser); } else if (token != XContentParser.Token.VALUE_NULL) { throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index d31d0c9ae14..0d2801e44a0 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -739,7 +739,7 @@ public class UpdateRequest extends InstanceShardOperationRequest fields(fields.toArray(new String[fields.size()])); } } else if ("_source".equals(currentFieldName)) { - fetchSourceContext = FetchSourceContext.parse(parser); + fetchSourceContext = FetchSourceContext.fromXContent(parser); } } if (script != null) { diff --git a/core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index 5bd7b20cb29..9ef9f2998b0 100644 --- a/core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -96,7 +96,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl ObjectParser.ValueType.OBJECT_ARRAY); PARSER.declareField((p, i, c) -> { try { - i.setFetchSourceContext(FetchSourceContext.parse(c.parser())); + i.setFetchSourceContext(FetchSourceContext.fromXContent(c.parser())); } catch (IOException e) { throw new ParsingException(p.getTokenLocation(), "Could not parse inner _source definition", e); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java index 978060632dc..68932d65b16 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java @@ -616,7 +616,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder scriptFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -699,7 +699,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder> sorts = SortBuilder.fromXContent(context); factory.sorts(sorts); } else if (SearchSourceBuilder._SOURCE_FIELD.match(currentFieldName)) { - factory.fetchSource(FetchSourceContext.parse(context.parser())); + factory.fetchSource(FetchSourceContext.fromXContent(context.parser())); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 35c6488136b..99cb5a5e6b3 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -948,7 +948,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } else if (TRACK_SCORES_FIELD.match(currentFieldName)) { trackScores = parser.booleanValue(); } else if (_SOURCE_FIELD.match(currentFieldName)) { - fetchSourceContext = FetchSourceContext.parse(context.parser()); + fetchSourceContext = FetchSourceContext.fromXContent(context.parser()); } else if (STORED_FIELDS_FIELD.match(currentFieldName)) { storedFieldsContext = StoredFieldsContext.fromXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), context); @@ -970,7 +970,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } else if (POST_FILTER_FIELD.match(currentFieldName)) { postQueryBuilder = context.parseInnerQueryBuilder(); } else if (_SOURCE_FIELD.match(currentFieldName)) { - fetchSourceContext = FetchSourceContext.parse(context.parser()); + fetchSourceContext = FetchSourceContext.fromXContent(context.parser()); } else if (SCRIPT_FIELDS_FIELD.match(currentFieldName)) { scriptFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -1059,7 +1059,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } } } else if (_SOURCE_FIELD.match(currentFieldName)) { - fetchSourceContext = FetchSourceContext.parse(context.parser()); + fetchSourceContext = FetchSourceContext.fromXContent(context.parser()); } else if (SEARCH_AFTER.match(currentFieldName)) { searchAfterBuilder = SearchAfterBuilder.fromXContent(parser, context.getParseFieldMatcher()); } else if (FIELDS_FIELD.match(currentFieldName)) { diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java index 55197b91064..f3e8bab93fa 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.fetch.subphase; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -55,10 +54,6 @@ public class FetchSourceContext implements Writeable, ToXContent { private final String[] excludes; private Function, Map> filter; - public static FetchSourceContext parse(XContentParser parser) throws IOException { - return fromXContent(parser, ParseFieldMatcher.STRICT); - } - public FetchSourceContext(boolean fetchSource, String[] includes, String[] excludes) { this.fetchSource = fetchSource; this.includes = includes == null ? Strings.EMPTY_ARRAY : includes; @@ -127,7 +122,7 @@ public class FetchSourceContext implements Writeable, ToXContent { return null; } - public static FetchSourceContext fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + public static FetchSourceContext fromXContent(XContentParser parser) throws IOException { XContentParser.Token token = parser.currentToken(); boolean fetchSource = true; String[] includes = Strings.EMPTY_ARRAY; From d87a30647b501b2fa7d99afb75f718c0ccbcd16f Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 4 Jan 2017 15:45:23 +0100 Subject: [PATCH 106/119] remove ParseFieldMatcher usages from SearchAfterBuilder --- .../org/elasticsearch/search/builder/SearchSourceBuilder.java | 2 +- .../elasticsearch/search/searchafter/SearchAfterBuilder.java | 3 +-- .../search/searchafter/SearchAfterBuilderTests.java | 4 ++-- .../elasticsearch/search/RandomSearchRequestGenerator.java | 2 +- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 99cb5a5e6b3..520b92692fa 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -1061,7 +1061,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } else if (_SOURCE_FIELD.match(currentFieldName)) { fetchSourceContext = FetchSourceContext.fromXContent(context.parser()); } else if (SEARCH_AFTER.match(currentFieldName)) { - searchAfterBuilder = SearchAfterBuilder.fromXContent(parser, context.getParseFieldMatcher()); + searchAfterBuilder = SearchAfterBuilder.fromXContent(parser); } else if (FIELDS_FIELD.match(currentFieldName)) { throw new ParsingException(parser.getTokenLocation(), "The field [" + SearchSourceBuilder.FIELDS_FIELD + "] is no longer supported, please use [" + diff --git a/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java b/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java index b9a6ca9be57..bc73ad7925b 100644 --- a/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.SortField; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -202,7 +201,7 @@ public class SearchAfterBuilder implements ToXContent, Writeable { builder.array(SEARCH_AFTER.getPreferredName(), sortValues); } - public static SearchAfterBuilder fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + public static SearchAfterBuilder fromXContent(XContentParser parser) throws IOException { SearchAfterBuilder builder = new SearchAfterBuilder(); XContentParser.Token token = parser.currentToken(); List values = new ArrayList<> (); diff --git a/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java b/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java index 93663d93683..8e372d95236 100644 --- a/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java @@ -133,7 +133,7 @@ public class SearchAfterBuilderTests extends ESTestCase { parser.nextToken(); parser.nextToken(); parser.nextToken(); - return SearchAfterBuilder.fromXContent(parser, null); + return SearchAfterBuilder.fromXContent(parser); } private static SearchAfterBuilder serializedCopy(SearchAfterBuilder original) throws IOException { @@ -172,7 +172,7 @@ public class SearchAfterBuilderTests extends ESTestCase { parser.nextToken(); parser.nextToken(); parser.nextToken(); - SearchAfterBuilder secondSearchAfterBuilder = SearchAfterBuilder.fromXContent(parser, null); + SearchAfterBuilder secondSearchAfterBuilder = SearchAfterBuilder.fromXContent(parser); assertNotSame(searchAfterBuilder, secondSearchAfterBuilder); assertEquals(searchAfterBuilder, secondSearchAfterBuilder); assertEquals(searchAfterBuilder.hashCode(), secondSearchAfterBuilder.hashCode()); diff --git a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java index db00d480a8a..fc240e6b555 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java @@ -302,7 +302,7 @@ public class RandomSearchRequestGenerator { parser.nextToken(); parser.nextToken(); parser.nextToken(); - builder.searchAfter(SearchAfterBuilder.fromXContent(parser, null).getSortValues()); + builder.searchAfter(SearchAfterBuilder.fromXContent(parser).getSortValues()); } catch (IOException e) { throw new RuntimeException("Error building search_from", e); } From d60e9bddd058e3dd3efd7ce09c858c3b8d69d413 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 4 Jan 2017 15:54:42 +0100 Subject: [PATCH 107/119] remove ParseFieldMatcher usages from IndexGraveyard --- .../cluster/metadata/IndexGraveyard.java | 17 ++++++++--------- .../java/org/elasticsearch/index/Index.java | 10 ++-------- 2 files changed, 10 insertions(+), 17 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java index 722af7f8750..d60617ea642 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java @@ -22,8 +22,6 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -70,7 +68,7 @@ public final class IndexGraveyard implements MetaData.Custom { public static final String TYPE = "index-graveyard"; private static final ParseField TOMBSTONES_FIELD = new ParseField("tombstones"); - private static final ObjectParser, ParseFieldMatcherSupplier> GRAVEYARD_PARSER; + private static final ObjectParser, Void> GRAVEYARD_PARSER; static { GRAVEYARD_PARSER = new ObjectParser<>("index_graveyard", ArrayList::new); GRAVEYARD_PARSER.declareObjectArray(List::addAll, Tombstone.getParser(), TOMBSTONES_FIELD); @@ -141,7 +139,7 @@ public final class IndexGraveyard implements MetaData.Custom { } public static IndexGraveyard fromXContent(final XContentParser parser) throws IOException { - return new IndexGraveyard(GRAVEYARD_PARSER.parse(parser, () -> ParseFieldMatcher.STRICT)); + return new IndexGraveyard(GRAVEYARD_PARSER.parse(parser, null)); } @Override @@ -354,16 +352,17 @@ public final class IndexGraveyard implements MetaData.Custom { private static final String INDEX_KEY = "index"; private static final String DELETE_DATE_IN_MILLIS_KEY = "delete_date_in_millis"; private static final String DELETE_DATE_KEY = "delete_date"; - private static final ObjectParser TOMBSTONE_PARSER; + private static final ObjectParser TOMBSTONE_PARSER; static { TOMBSTONE_PARSER = new ObjectParser<>("tombstoneEntry", Tombstone.Builder::new); - TOMBSTONE_PARSER.declareObject(Tombstone.Builder::index, Index::parseIndex, new ParseField(INDEX_KEY)); + TOMBSTONE_PARSER.declareObject(Tombstone.Builder::index, (parser, context) -> Index.fromXContent(parser), + new ParseField(INDEX_KEY)); TOMBSTONE_PARSER.declareLong(Tombstone.Builder::deleteDateInMillis, new ParseField(DELETE_DATE_IN_MILLIS_KEY)); TOMBSTONE_PARSER.declareString((b, s) -> {}, new ParseField(DELETE_DATE_KEY)); } - static ContextParser getParser() { - return (p, c) -> TOMBSTONE_PARSER.apply(p, c).build(); + static ContextParser getParser() { + return (parser, context) -> TOMBSTONE_PARSER.apply(parser, null).build(); } private final Index index; @@ -438,7 +437,7 @@ public final class IndexGraveyard implements MetaData.Custom { } public static Tombstone fromXContent(final XContentParser parser) throws IOException { - return TOMBSTONE_PARSER.parse(parser, () -> ParseFieldMatcher.STRICT).build(); + return TOMBSTONE_PARSER.parse(parser, null).build(); } /** diff --git a/core/src/main/java/org/elasticsearch/index/Index.java b/core/src/main/java/org/elasticsearch/index/Index.java index 25b293ad387..da94ad2ec72 100644 --- a/core/src/main/java/org/elasticsearch/index/Index.java +++ b/core/src/main/java/org/elasticsearch/index/Index.java @@ -21,8 +21,6 @@ package org.elasticsearch.index; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -42,7 +40,7 @@ public class Index implements Writeable, ToXContent { public static final Index[] EMPTY_ARRAY = new Index[0]; private static final String INDEX_UUID_KEY = "index_uuid"; private static final String INDEX_NAME_KEY = "index_name"; - private static final ObjectParser INDEX_PARSER = new ObjectParser<>("index", Builder::new); + private static final ObjectParser INDEX_PARSER = new ObjectParser<>("index", Builder::new); static { INDEX_PARSER.declareString(Builder::name, new ParseField(INDEX_NAME_KEY)); INDEX_PARSER.declareString(Builder::uuid, new ParseField(INDEX_UUID_KEY)); @@ -118,11 +116,7 @@ public class Index implements Writeable, ToXContent { } public static Index fromXContent(final XContentParser parser) throws IOException { - return INDEX_PARSER.parse(parser, () -> ParseFieldMatcher.STRICT).build(); - } - - public static final Index parseIndex(final XContentParser parser, final ParseFieldMatcherSupplier supplier) { - return INDEX_PARSER.apply(parser, supplier).build(); + return INDEX_PARSER.parse(parser, null).build(); } /** From 13dcb8ccbef75dbc68ea0a93a45f052c0be17580 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 4 Jan 2017 15:56:25 +0100 Subject: [PATCH 108/119] remove ParseFieldMatcher usages from IngestMetadata --- .../main/java/org/elasticsearch/ingest/IngestMetadata.java | 6 ++---- .../org/elasticsearch/ingest/PipelineConfiguration.java | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/ingest/IngestMetadata.java b/core/src/main/java/org/elasticsearch/ingest/IngestMetadata.java index 6c87d6c5892..012ce5ba68d 100644 --- a/core/src/main/java/org/elasticsearch/ingest/IngestMetadata.java +++ b/core/src/main/java/org/elasticsearch/ingest/IngestMetadata.java @@ -24,8 +24,6 @@ import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -47,7 +45,7 @@ public final class IngestMetadata implements MetaData.Custom { public static final String TYPE = "ingest"; private static final ParseField PIPELINES_FIELD = new ParseField("pipeline"); - private static final ObjectParser, ParseFieldMatcherSupplier> INGEST_METADATA_PARSER = new ObjectParser<>( + private static final ObjectParser, Void> INGEST_METADATA_PARSER = new ObjectParser<>( "ingest_metadata", ArrayList::new); static { @@ -95,7 +93,7 @@ public final class IngestMetadata implements MetaData.Custom { public static IngestMetadata fromXContent(XContentParser parser) throws IOException { Map pipelines = new HashMap<>(); - List configs = INGEST_METADATA_PARSER.parse(parser, () -> ParseFieldMatcher.STRICT); + List configs = INGEST_METADATA_PARSER.parse(parser, null); for (PipelineConfiguration pipeline : configs) { pipelines.put(pipeline.getId(), pipeline); } diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index c983c205f8d..543a02d3e86 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -50,8 +50,8 @@ public final class PipelineConfiguration extends AbstractDiffable getParser() { - return (p, c) -> PARSER.apply(p ,c).build(); + public static ContextParser getParser() { + return (parser, context) -> PARSER.apply(parser, null).build(); } private static class Builder { From 975fee402a831f12eb684d1535800ed2e20fcf5a Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 4 Jan 2017 15:57:49 +0100 Subject: [PATCH 109/119] remove ParseFieldMatcher usages from suggesters --- .../suggest/completion/context/CategoryQueryContext.java | 6 ++---- .../search/suggest/phrase/LinearInterpolation.java | 2 -- .../search/suggest/phrase/PhraseSuggestionBuilder.java | 2 -- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java index a17b7a87b43..59f59075bd3 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java @@ -21,8 +21,6 @@ package org.elasticsearch.search.suggest.completion.context; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -98,7 +96,7 @@ public final class CategoryQueryContext implements ToXContent { return result; } - private static ObjectParser CATEGORY_PARSER = new ObjectParser<>(NAME, null); + private static ObjectParser CATEGORY_PARSER = new ObjectParser<>(NAME, null); static { CATEGORY_PARSER.declareString(Builder::setCategory, new ParseField(CONTEXT_VALUE)); CATEGORY_PARSER.declareInt(Builder::setBoost, new ParseField(CONTEXT_BOOST)); @@ -110,7 +108,7 @@ public final class CategoryQueryContext implements ToXContent { XContentParser.Token token = parser.currentToken(); Builder builder = builder(); if (token == XContentParser.Token.START_OBJECT) { - CATEGORY_PARSER.parse(parser, builder, () -> ParseFieldMatcher.STRICT); + CATEGORY_PARSER.parse(parser, builder, null); } else if (token == XContentParser.Token.VALUE_STRING) { builder.setCategory(parser.text()); } else { diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java index e76868b5b3f..dbd1540b325 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java @@ -23,7 +23,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -138,7 +137,6 @@ public final class LinearInterpolation extends SmoothingModel { double trigramLambda = 0.0; double bigramLambda = 0.0; double unigramLambda = 0.0; - ParseFieldMatcher matcher = parseContext.getParseFieldMatcher(); while ((token = parser.nextToken()) != Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index 12d2af289a9..7611d30263c 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.suggest.phrase; import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -491,7 +490,6 @@ public class PhraseSuggestionBuilder extends SuggestionBuilder Date: Wed, 4 Jan 2017 15:59:31 +0100 Subject: [PATCH 110/119] remove ParseFieldMatcher usages from InternalSearchHit --- .../elasticsearch/search/internal/InternalSearchHit.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java index 76de9740c8e..5784c31d99c 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java @@ -23,8 +23,6 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; @@ -726,7 +724,7 @@ public class InternalSearchHit implements SearchHit { return builder; } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "nested_identity", ctorArgs -> new InternalNestedIdentity((String) ctorArgs[0], (int) ctorArgs[1], (InternalNestedIdentity) ctorArgs[2])); static { @@ -736,7 +734,7 @@ public class InternalSearchHit implements SearchHit { } public static InternalNestedIdentity fromXContent(XContentParser parser) { - return PARSER.apply(parser, () -> ParseFieldMatcher.EMPTY); + return PARSER.apply(parser, null); } @Override From 20f90178fe975b0f2526b2834e4d5ca681e38dbb Mon Sep 17 00:00:00 2001 From: Jason Veatch Date: Thu, 5 Jan 2017 14:36:18 -0500 Subject: [PATCH 111/119] Docs: Detail on false/strict dynamic mapping setting (#22451) Reference: https://www.elastic.co/guide/en/elasticsearch/guide/master/dynamic-mapping.html --- docs/reference/mapping/dynamic/field-mapping.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/mapping/dynamic/field-mapping.asciidoc b/docs/reference/mapping/dynamic/field-mapping.asciidoc index 7bed12b5b78..2b0afa1562d 100644 --- a/docs/reference/mapping/dynamic/field-mapping.asciidoc +++ b/docs/reference/mapping/dynamic/field-mapping.asciidoc @@ -4,7 +4,8 @@ By default, when a previously unseen field is found in a document, Elasticsearch will add the new field to the type mapping. This behaviour can be disabled, both at the document and at the <> level, by -setting the <> parameter to `false` or to `strict`. +setting the <> parameter to `false` (to ignore new fields) or to `strict` (to throw +an exception if an unknown field is encountered). Assuming `dynamic` field mapping is enabled, some simple rules are used to determine which datatype the field should have: From 27c57aeebeaa68dfb361465fe8fb66db97629776 Mon Sep 17 00:00:00 2001 From: Johannes Kanavin Date: Thu, 5 Jan 2017 19:30:05 +0100 Subject: [PATCH 112/119] Fixed id's of 'worked example' in scripted metric aggs docs (#22430) --- .../aggregations/metrics/scripted-metric-aggregation.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index f0ada57b37f..b7193b827cd 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -156,9 +156,9 @@ PUT /transactions/stock/_bulk?refresh {"type": "sale","amount": 80} {"index":{"_id":2}} {"type": "cost","amount": 10} -{"index":{"_id":2}} +{"index":{"_id":3}} {"type": "cost","amount": 30} -{"index":{"_id":2}} +{"index":{"_id":4}} {"type": "sale","amount": 130} -------------------------------------------------- // CONSOLE From 9219d667e88496282d16e00550341662f6c3e6fc Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 6 Jan 2017 06:37:49 -0500 Subject: [PATCH 113/119] Disable the Netty recycler and pooled allocator Netty plays a lot of games with recycling byte buffers in thread local caches, and using a pooled byte buffer allocator to reduce pressure on the garbage collector. The recycler in particular appears to be fraught with peril. It appears that there are circumstances where the recycler does not recycle quickly enough and can exceed its capacity leading to heap exhaustion and out of memory errors. If you spend a few minutes reading the history of the recycler on the Netty GitHub issues, it appears it has been nothing but a source of trouble, and the project itself has an open issue that proposes disabling by default and possibly even removing the recycler. The pooled byte buffer allocator has problems itself. It sizes the pool based on the number of runtime processors and can indeed grab a very large percentage of the heap (in some cases 50% or more). Additionally, the Netty project continues to struggle with leaks here. We are seeing users struggle with issues in 5.x that I think are largely driven by some of the problems here with Netty. This change proposes to disable the recycler, and to disable the pooled byte buffer allocator. I think that disabling these features will return some of the stablity that these features appear to be losing us. I have done performance testing on my workstation with disabling these and I do not see a difference in performance. I propose that we make this change in master and let some nightly benchmarks run to confirm that there is not a difference in performance. If we are comfortable with the performance changes, I propose backporting this to all active branches. Relates #22452 --- distribution/src/main/resources/config/jvm.options | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/distribution/src/main/resources/config/jvm.options b/distribution/src/main/resources/config/jvm.options index 11ec1cd66dc..884b43a6091 100644 --- a/distribution/src/main/resources/config/jvm.options +++ b/distribution/src/main/resources/config/jvm.options @@ -65,9 +65,11 @@ # use old-style file permissions on JDK9 -Djdk.io.permissionsUseCanonicalPath=true -# flags to keep Netty from being unsafe +# flags to configure Netty -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true +-Dio.netty.recycler.maxCapacityPerThread=0 +-Dio.netty.allocator.type=unpooled # log4j 2 -Dlog4j.shutdownHookEnabled=false From 923820c6c9b57031185e65d594de9eba771ae91b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 6 Jan 2017 10:24:52 -0500 Subject: [PATCH 114/119] Document the `detailed` parameter of tasks API (#22425) Provides an example of using is and an example return description and explains that we've added descriptions for some tasks but not even close to all of them. And that we expect to change the descriptions as we learn more. Closes #22407 * Fix example Getting a single task is always detailed, no need to specify. * Rewrite like imotov wants it --- docs/reference/cluster/tasks.asciidoc | 58 +++++++++++++++++++++++++-- 1 file changed, 54 insertions(+), 4 deletions(-) diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc index ce550a689bf..e087eebd9c7 100644 --- a/docs/reference/cluster/tasks.asciidoc +++ b/docs/reference/cluster/tasks.asciidoc @@ -79,8 +79,57 @@ GET _tasks?parent_task_id=parentTaskId:1 <1> <1> This won't return a 404 if the parent isn't found. -The task API can be also used to wait for completion of a particular task. The following call will -block for 10 seconds or until the task with id `oTUltX4IQMOUUVeiohTt8A:12345` is completed. +You can also use the `detailed` request parameter to get more information about +the running tasks. This is useful for telling one task from another but is more +costly to execute. For example, fetching all searches using the `detailed` +request parameter: + +[source,js] +-------------------------------------------------- +GET _tasks?actions=*search&detailed +-------------------------------------------------- +// CONSOLE + +might look like: + +[source,js] +-------------------------------------------------- +{ + "nodes" : { + "oTUltX4IQMOUUVeiohTt8A" : { + "name" : "H5dfFeA", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1:9300", + "tasks" : { + "oTUltX4IQMOUUVeiohTt8A:464" : { + "node" : "oTUltX4IQMOUUVeiohTt8A", + "id" : 464, + "type" : "transport", + "action" : "indices:data/read/search", + "description" : "indices[test], types[test], search_type[QUERY_THEN_FETCH], source[{\"query\":...}]", + "start_time_in_millis" : 1483478610008, + "running_time_in_nanos" : 13991383, + "cancellable" : true + } + } + } + } +} +-------------------------------------------------- + +The new `description` field contains human readable text that identifies the +particular request that the task is performing such as identifying the search +request being performed by a search task like the example above. Other kinds of +task have have different descriptions, like <> which +has the search and the destination, or <> which just has the +number of requests and the destination indices. Many requests will only have an +empty description because more detailed information about the request is not +easily available or particularly helpful in identifying the request. + +The task API can also be used to wait for completion of a particular task. The +following call will block for 10 seconds or until the task with id +`oTUltX4IQMOUUVeiohTt8A:12345` is completed. [source,js] -------------------------------------------------- @@ -98,12 +147,13 @@ GET _tasks?actions=*reindex&wait_for_completion=true&timeout=10s -------------------------------------------------- // CONSOLE -Tasks can be also listed using _cat version of the list tasks command, which accepts the same arguments -as the standard list tasks command. +Tasks can be also listed using _cat version of the list tasks command, which +accepts the same arguments as the standard list tasks command. [source,js] -------------------------------------------------- GET _cat/tasks +GET _cat/tasks?detailed -------------------------------------------------- // CONSOLE From 79093e166316b4b891c3db07b1c2e7785f55b939 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 6 Jan 2017 16:36:43 +0100 Subject: [PATCH 115/119] Ensure shrunk indices carry over version information from its source (#22469) Today when an index is shrunk the version information is not carried over from the source to the target index. This can cause major issues like mapping incompatibilities for instance if an index from a previous major version is shrunk. This commit ensures that all version information from the soruce index is preserved when a shrunk index is created. Closes #22373 --- .../metadata/MetaDataCreateIndexService.java | 6 + .../admin/indices/create/CreateIndexIT.java | 195 +------------- .../admin/indices/create/ShrinkIndexIT.java | 246 ++++++++++++++++++ .../MetaDataCreateIndexServiceTests.java | 16 ++ 4 files changed, 269 insertions(+), 194 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 1cbc81cb26c..10cd0f60429 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -545,6 +545,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { throw new IllegalArgumentException("mappings are not allowed when shrinking indices" + ", all mappings are copied from the source index"); } + if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) { // this method applies all necessary checks ie. if the target shards are less than the source shards // of if the source shards are divisible by the number of target shards @@ -588,9 +589,14 @@ public class MetaDataCreateIndexService extends AbstractComponent { .put("index.allocation.max_retries", 1) // now copy all similarity / analysis settings - this overrides all settings from the user unless they // wanna add extra settings + .put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion()) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion()) .put(sourceMetaData.getSettings().filter(analysisSimilarityPredicate)) .put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), shrinkFromIndex.getName()) .put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), shrinkFromIndex.getUUID()); + if (sourceMetaData.getMinimumCompatibleVersion() != null) { + indexSettingsBuilder.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, sourceMetaData.getMinimumCompatibleVersion()); + } } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 0219078fd31..1f5c92e286f 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -21,26 +21,19 @@ package org.elasticsearch.action.admin.indices.create; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.UnavailableShardsException; -import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.RangeQueryBuilder; -import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -53,7 +46,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_WAIT_FOR_ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -63,6 +55,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @ClusterScope(scope = Scope.TEST) public class CreateIndexIT extends ESIntegTestCase { + public void testCreationDateGivenFails() { try { prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 4L)).get(); @@ -288,192 +281,6 @@ public class CreateIndexIT extends ESIntegTestCase { ensureGreen("test"); } - public void testCreateShrinkIndexToN() { - int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}}; - int[] shardSplits = randomFrom(possibleShardSplits); - assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]); - assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]); - internalCluster().ensureAtLeastNumDataNodes(2); - prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get(); - for (int i = 0; i < 20; i++) { - client().prepareIndex("source", "t1", Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get(); - } - ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() - .getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); - DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); - String mergeNode = discoveryNodes[0].getName(); - // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node - // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due - // to the require._name below. - ensureGreen(); - // relocate all shards to one node such that we can merge it. - client().admin().indices().prepareUpdateSettings("source") - .setSettings(Settings.builder() - .put("index.routing.allocation.require._name", mergeNode) - .put("index.blocks.write", true)).get(); - ensureGreen(); - // now merge source into a 4 shard index - assertAcked(client().admin().indices().prepareShrinkIndex("source", "first_shrink") - .setSettings(Settings.builder() - .put("index.number_of_replicas", 0) - .put("index.number_of_shards", shardSplits[1]).build()).get()); - ensureGreen(); - assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); - - for (int i = 0; i < 20; i++) { // now update - client().prepareIndex("first_shrink", "t1", Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get(); - } - flushAndRefresh(); - assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); - assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); - - // relocate all shards to one node such that we can merge it. - client().admin().indices().prepareUpdateSettings("first_shrink") - .setSettings(Settings.builder() - .put("index.routing.allocation.require._name", mergeNode) - .put("index.blocks.write", true)).get(); - ensureGreen(); - // now merge source into a 2 shard index - assertAcked(client().admin().indices().prepareShrinkIndex("first_shrink", "second_shrink") - .setSettings(Settings.builder() - .put("index.number_of_replicas", 0) - .put("index.number_of_shards", shardSplits[2]).build()).get()); - ensureGreen(); - assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); - // let it be allocated anywhere and bump replicas - client().admin().indices().prepareUpdateSettings("second_shrink") - .setSettings(Settings.builder() - .putNull("index.routing.allocation.include._id") - .put("index.number_of_replicas", 1)).get(); - ensureGreen(); - assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); - - for (int i = 0; i < 20; i++) { // now update - client().prepareIndex("second_shrink", "t1", Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get(); - } - flushAndRefresh(); - assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); - assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); - assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); - } - - public void testCreateShrinkIndex() { - internalCluster().ensureAtLeastNumDataNodes(2); - prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7))).get(); - for (int i = 0; i < 20; i++) { - client().prepareIndex("source", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get(); - } - ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() - .getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); - DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); - String mergeNode = discoveryNodes[0].getName(); - // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node - // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due - // to the require._name below. - ensureGreen(); - // relocate all shards to one node such that we can merge it. - client().admin().indices().prepareUpdateSettings("source") - .setSettings(Settings.builder() - .put("index.routing.allocation.require._name", mergeNode) - .put("index.blocks.write", true)).get(); - ensureGreen(); - // now merge source into a single shard index - - final boolean createWithReplicas = randomBoolean(); - assertAcked(client().admin().indices().prepareShrinkIndex("source", "target") - .setSettings(Settings.builder().put("index.number_of_replicas", createWithReplicas ? 1 : 0).build()).get()); - ensureGreen(); - assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); - - if (createWithReplicas == false) { - // bump replicas - client().admin().indices().prepareUpdateSettings("target") - .setSettings(Settings.builder() - .put("index.number_of_replicas", 1)).get(); - ensureGreen(); - assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); - } - - for (int i = 20; i < 40; i++) { - client().prepareIndex("target", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get(); - } - flushAndRefresh(); - assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 40); - assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); - - } - /** - * Tests that we can manually recover from a failed allocation due to shards being moved away etc. - */ - public void testCreateShrinkIndexFails() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(2); - prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) - .put("number_of_shards", randomIntBetween(2, 7)) - .put("number_of_replicas", 0)).get(); - for (int i = 0; i < 20; i++) { - client().prepareIndex("source", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get(); - } - ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() - .getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); - DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); - String spareNode = discoveryNodes[0].getName(); - String mergeNode = discoveryNodes[1].getName(); - // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node - // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due - // to the require._name below. - ensureGreen(); - // relocate all shards to one node such that we can merge it. - client().admin().indices().prepareUpdateSettings("source") - .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode) - .put("index.blocks.write", true)).get(); - ensureGreen(); - - // now merge source into a single shard index - client().admin().indices().prepareShrinkIndex("source", "target") - .setSettings(Settings.builder() - .put("index.routing.allocation.exclude._name", mergeNode) // we manually exclude the merge node to forcefully fuck it up - .put("index.number_of_replicas", 0) - .put("index.allocation.max_retries", 1).build()).get(); - - // now we move all shards away from the merge node - client().admin().indices().prepareUpdateSettings("source") - .setSettings(Settings.builder().put("index.routing.allocation.require._name", spareNode) - .put("index.blocks.write", true)).get(); - ensureGreen("source"); - - client().admin().indices().prepareUpdateSettings("target") // erase the forcefully fuckup! - .setSettings(Settings.builder().putNull("index.routing.allocation.exclude._name")).get(); - // wait until it fails - assertBusy(() -> { - ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); - RoutingTable routingTables = clusterStateResponse.getState().routingTable(); - assertTrue(routingTables.index("target").shard(0).getShards().get(0).unassigned()); - assertEquals(UnassignedInfo.Reason.ALLOCATION_FAILED, - routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getReason()); - assertEquals(1, - routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations()); - }); - client().admin().indices().prepareUpdateSettings("source") // now relocate them all to the right node - .setSettings(Settings.builder() - .put("index.routing.allocation.require._name", mergeNode)).get(); - ensureGreen("source"); - - final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class, - internalCluster().getMasterName()); - infoService.refresh(); - // kick off a retry and wait until it's done! - ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); - long expectedShardSize = clusterRerouteResponse.getState().routingTable().index("target") - .shard(0).getShards().get(0).getExpectedShardSize(); - // we support the expected shard size in the allocator to sum up over the source index shards - assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0); - ensureGreen(); - assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); - } - /** * This test ensures that index creation adheres to the {@link IndexMetaData#SETTING_WAIT_FOR_ACTIVE_SHARDS}. */ diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java new file mode 100644 index 00000000000..e19f930a22d --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -0,0 +1,246 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.create; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.VersionUtils; + +import java.util.Arrays; +import java.util.Collection; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; + +public class ShrinkIndexIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(InternalSettingsPlugin.class); + } + + public void testCreateShrinkIndexToN() { + int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}}; + int[] shardSplits = randomFrom(possibleShardSplits); + assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]); + assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source", "t1", Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get(); + } + ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() + .getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); + String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source") + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", mergeNode) + .put("index.blocks.write", true)).get(); + ensureGreen(); + // now merge source into a 4 shard index + assertAcked(client().admin().indices().prepareShrinkIndex("source", "first_shrink") + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[1]).build()).get()); + ensureGreen(); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + for (int i = 0; i < 20; i++) { // now update + client().prepareIndex("first_shrink", "t1", Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("first_shrink") + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", mergeNode) + .put("index.blocks.write", true)).get(); + ensureGreen(); + // now merge source into a 2 shard index + assertAcked(client().admin().indices().prepareShrinkIndex("first_shrink", "second_shrink") + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[2]).build()).get()); + ensureGreen(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + // let it be allocated anywhere and bump replicas + client().admin().indices().prepareUpdateSettings("second_shrink") + .setSettings(Settings.builder() + .putNull("index.routing.allocation.include._id") + .put("index.number_of_replicas", 1)).get(); + ensureGreen(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + for (int i = 0; i < 20; i++) { // now update + client().prepareIndex("second_shrink", "t1", Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + } + + public void testCreateShrinkIndex() { + internalCluster().ensureAtLeastNumDataNodes(2); + Version version = VersionUtils.randomVersion(random()); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) + .put("number_of_shards", randomIntBetween(2, 7)) + .put("index.version.created", version) + ).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get(); + } + ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() + .getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); + String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source") + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", mergeNode) + .put("index.blocks.write", true)).get(); + ensureGreen(); + // now merge source into a single shard index + + final boolean createWithReplicas = randomBoolean(); + assertAcked(client().admin().indices().prepareShrinkIndex("source", "target") + .setSettings(Settings.builder().put("index.number_of_replicas", createWithReplicas ? 1 : 0).build()).get()); + ensureGreen(); + assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + if (createWithReplicas == false) { + // bump replicas + client().admin().indices().prepareUpdateSettings("target") + .setSettings(Settings.builder() + .put("index.number_of_replicas", 1)).get(); + ensureGreen(); + assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + } + + for (int i = 20; i < 40; i++) { + client().prepareIndex("target", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 40); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + } + /** + * Tests that we can manually recover from a failed allocation due to shards being moved away etc. + */ + public void testCreateShrinkIndexFails() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) + .put("number_of_shards", randomIntBetween(2, 7)) + .put("number_of_replicas", 0)).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get(); + } + ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() + .getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); + String spareNode = discoveryNodes[0].getName(); + String mergeNode = discoveryNodes[1].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode) + .put("index.blocks.write", true)).get(); + ensureGreen(); + + // now merge source into a single shard index + client().admin().indices().prepareShrinkIndex("source", "target") + .setWaitForActiveShards(ActiveShardCount.NONE) + .setSettings(Settings.builder() + .put("index.routing.allocation.exclude._name", mergeNode) // we manually exclude the merge node to forcefully fuck it up + .put("index.number_of_replicas", 0) + .put("index.allocation.max_retries", 1).build()).get(); + client().admin().cluster().prepareHealth("target").setWaitForEvents(Priority.LANGUID).get(); + + // now we move all shards away from the merge node + client().admin().indices().prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", spareNode) + .put("index.blocks.write", true)).get(); + ensureGreen("source"); + + client().admin().indices().prepareUpdateSettings("target") // erase the forcefully fuckup! + .setSettings(Settings.builder().putNull("index.routing.allocation.exclude._name")).get(); + // wait until it fails + assertBusy(() -> { + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + RoutingTable routingTables = clusterStateResponse.getState().routingTable(); + assertTrue(routingTables.index("target").shard(0).getShards().get(0).unassigned()); + assertEquals(UnassignedInfo.Reason.ALLOCATION_FAILED, + routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getReason()); + assertEquals(1, + routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations()); + }); + client().admin().indices().prepareUpdateSettings("source") // now relocate them all to the right node + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", mergeNode)).get(); + ensureGreen("source"); + + final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class, + internalCluster().getMasterName()); + infoService.refresh(); + // kick off a retry and wait until it's done! + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + long expectedShardSize = clusterRerouteResponse.getState().routingTable().index("target") + .shard(0).getShards().get(0).getExpectedShardSize(); + // we support the expected shard size in the allocator to sum up over the source index shards + assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0); + ensureGreen(); + assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index d44e533fb54..50fe3c88b65 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -38,13 +38,16 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import static java.util.Collections.emptyMap; +import static java.util.Collections.min; import static org.hamcrest.Matchers.endsWith; public class MetaDataCreateIndexServiceTests extends ESTestCase { @@ -150,11 +153,20 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { public void testShrinkIndexSettings() { String indexName = randomAsciiOfLength(10); + List versions = Arrays.asList(VersionUtils.randomVersion(random()), VersionUtils.randomVersion(random()), + VersionUtils.randomVersion(random())); + versions.sort((l, r) -> Long.compare(l.id, r.id)); + Version version = versions.get(0); + Version minCompat = versions.get(1); + Version upgraded = versions.get(2); // create one that won't fail ClusterState clusterState = ClusterState.builder(createClusterState(indexName, randomIntBetween(2, 10), 0, Settings.builder() .put("index.blocks.write", true) .put("index.similarity.default.type", "BM25") + .put("index.version.created", version) + .put("index.version.upgraded", upgraded) + .put("index.version.minimum_compatible", minCompat.luceneVersion) .put("index.analysis.analyzer.my_analyzer.tokenizer", "keyword") .build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); @@ -177,6 +189,10 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { "keyword", builder.build().get("index.analysis.analyzer.my_analyzer.tokenizer")); assertEquals("node1", builder.build().get("index.routing.allocation.initial_recovery._id")); assertEquals("1", builder.build().get("index.allocation.max_retries")); + assertEquals(version, builder.build().getAsVersion("index.version.created", null)); + assertEquals(upgraded, builder.build().getAsVersion("index.version.upgraded", null)); + assertEquals(minCompat.luceneVersion.toString(), builder.build().get("index.version.minimum_compatible", null)); + } private DiscoveryNode newNode(String nodeId) { From 0035f5ab95bed4434c65fa5b3c392ded4f91dc57 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 6 Jan 2017 16:46:26 +0100 Subject: [PATCH 116/119] Fix compilation of benchmarks on JDK 9 The JDK 9 compiler (b151) emits the warning "No processor claimed any of these annotations" for annotations that would be runtime annotation. Maybe a regression from https://bugs.openjdk.java.net/browse/JDK-8039469. This is a quick fix so that compilation works again. --- benchmarks/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 36732215d43..fe6d7b59eb3 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -55,7 +55,7 @@ dependencies { runtime 'org.apache.commons:commons-math3:3.2' } -compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" +compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked,-processing" // enable the JMH's BenchmarkProcessor to generate the final benchmark classes // needs to be added separately otherwise Gradle will quote it and javac will fail compileJava.options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"]) From a487b90498ae469b4437cc85d745c645ae5d4f79 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Fri, 6 Jan 2017 11:07:40 -0500 Subject: [PATCH 117/119] [TEST] fix explain API rest test that assumes there is only a single node in the cluster (incorrect assumption) --- .../test/cluster.allocation_explain/10_basic.yaml | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml index 5ad5a4dce39..2c55677d88b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml @@ -19,9 +19,6 @@ cluster.state: metric: [ master_node ] - # This relies on there only being a single node in the test cluster, which - # is currently true, but if this changes in the future this test will need - # to be changed - do: cluster.allocation_explain: body: { "index": "test", "shard": 0, "primary": true } @@ -31,10 +28,10 @@ - match: { index: "test" } - match: { shard: 0 } - match: { primary: true } - - match: { can_remain_on_current_node: "yes" } - - match: { can_rebalance_cluster: "no" } - - match: { can_rebalance_to_other_node: "no" } - - match: { rebalance_explanation: "rebalancing is not allowed" } + - is_true: can_remain_on_current_node + - is_true: can_rebalance_cluster + - is_true: can_rebalance_to_other_node + - is_true: rebalance_explanation --- "cluster shard allocation explanation test with empty request": From f24ca5188a39a9d8bd48f76bb7cbae2f7897e83d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 6 Jan 2017 11:35:22 -0500 Subject: [PATCH 118/119] Fix some issues with painless's strings (#22393) 1. Escape sequences we're working. For example `\\` is now correctly interpreted as `\` instead of `\\`. Same with `\'` being `'` and `\"` being `"`. 2. `'` delimited strings weren't allowed to contain `"`s but it looked like they were intended to support it. Now they do. 3. Improves the error message when the script contains an invalid escape sequence inside a string to include a list of the valid escape sequences. Closes #22372 --- .../src/main/antlr/PainlessLexer.g4 | 2 +- .../painless/antlr/EnhancedPainlessLexer.java | 19 +++- .../painless/antlr/PainlessLexer.java | 102 +++++++++--------- .../elasticsearch/painless/antlr/Walker.java | 22 +++- .../painless/BasicExpressionTests.java | 32 ++++++ .../painless/WhenThingsGoWrongTests.java | 16 +++ 6 files changed, 134 insertions(+), 59 deletions(-) diff --git a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 index 9504a3d911a..18fdae751af 100644 --- a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 @@ -107,7 +107,7 @@ HEX: '0' [xX] [0-9a-fA-F]+ [lL]?; INTEGER: ( '0' | [1-9] [0-9]* ) [lLfFdD]?; DECIMAL: ( '0' | [1-9] [0-9]* ) (DOT [0-9]+)? ( [eE] [+\-]? [0-9]+ )? [fFdD]?; -STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' ) | ( '\'' ( '\\\'' | '\\\\' | ~[\\"] )*? '\'' ); +STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' ) | ( '\'' ( '\\\'' | '\\\\' | ~[\\'] )*? '\'' ); REGEX: '/' ( ~('/' | '\n') | '\\' ~'\n' )+ '/' [cilmsUux]* { SlashStrategy.slashIsRegex(this) }?; TRUE: 'true'; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/EnhancedPainlessLexer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/EnhancedPainlessLexer.java index 94a2c258974..244c2f38e62 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/EnhancedPainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/EnhancedPainlessLexer.java @@ -29,12 +29,13 @@ import org.antlr.v4.runtime.misc.Pair; import org.elasticsearch.painless.Location; /** - * A lexer that is customized for painless. It will: + * A lexer that is customized for painless. It: *
    - *
  • will override the default error behavior to fail on the first error - *
  • store the last token in case we need to do lookbehind for semicolon insertion and regex vs division detection - *
  • insert semicolons where they'd improve the language's readability. Rather than hack this into the parser and create a ton of + *
  • Overrides the default error behavior to fail on the first error + *
  • Stores the last token in case we need to do lookbehind for semicolon insertion and regex vs division detection + *
  • Insert semicolons where they'd improve the language's readability. Rather than hack this into the parser and create a ton of * ambiguity we hack them here where we can use heuristics to do it quickly. + *
  • Enhances the error message when a string contains invalid escape sequences to include a list of valid escape sequences. *
*/ final class EnhancedPainlessLexer extends PainlessLexer { @@ -77,7 +78,15 @@ final class EnhancedPainlessLexer extends PainlessLexer { final String text = charStream.getText(Interval.of(startIndex, charStream.index())); Location location = new Location(sourceName, _tokenStartCharIndex); - throw location.createError(new IllegalArgumentException("unexpected character [" + getErrorDisplay(text) + "].", lnvae)); + String message = "unexpected character [" + getErrorDisplay(text) + "]."; + char firstChar = text.charAt(0); + if ((firstChar == '\'' || firstChar == '"') && text.length() - 2 > 0 && text.charAt(text.length() - 2) == '\\') { + /* Use a simple heuristic to guess if the unrecognized characters were trying to be a string but has a broken escape sequence. + * If it was add an extra message about valid string escape sequences. */ + message += " The only valid escape sequences in strings starting with [" + firstChar + "] are [\\\\] and [\\" + + firstChar + "]."; + } + throw location.createError(new IllegalArgumentException(message, lnvae)); } private static boolean insertSemicolon(Token previous, Token next) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java index 529ae6f89ea..900180ec106 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java @@ -203,51 +203,51 @@ class PainlessLexer extends Lexer { "\348\35:\36<\37> @!B\"D#F$H%J&L\'N(P)R*T+V,X-Z.\\/^\60`\61b\62d\63f\64"+ "h\65j\66l\67n8p9r:t;v|?~@\u0080A\u0082B\u0084C\u0086D\u0088E\u008a"+ "F\u008cG\u008eH\u0090I\u0092J\u0094K\u0096L\u0098M\u009aN\u009cO\u009e"+ - "P\u00a0Q\u00a2R\u00a4S\u00a6T\u00a8U\u00aaV\4\2\3\24\5\2\13\f\17\17\""+ + "P\u00a0Q\u00a2R\u00a4S\u00a6T\u00a8U\u00aaV\4\2\3\25\5\2\13\f\17\17\""+ "\"\4\2\f\f\17\17\3\2\629\4\2NNnn\4\2ZZzz\5\2\62;CHch\3\2\63;\3\2\62;\b"+ - "\2FFHHNNffhhnn\4\2GGgg\4\2--//\6\2FFHHffhh\4\2$$^^\4\2\f\f\61\61\3\2\f"+ - "\f\t\2WWeekknouuwwzz\5\2C\\aac|\6\2\62;C\\aac|\u0277\2\4\3\2\2\2\2\6\3"+ - "\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2\2\16\3\2\2\2\2\20\3\2\2\2\2"+ - "\22\3\2\2\2\2\24\3\2\2\2\2\26\3\2\2\2\2\30\3\2\2\2\2\32\3\2\2\2\2\34\3"+ - "\2\2\2\2\36\3\2\2\2\2 \3\2\2\2\2\"\3\2\2\2\2$\3\2\2\2\2&\3\2\2\2\2(\3"+ - "\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60\3\2\2\2\2\62\3\2\2\2\2\64"+ - "\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3\2\2\2\2<\3\2\2\2\2>\3\2\2\2\2@\3"+ - "\2\2\2\2B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2\2H\3\2\2\2\2J\3\2\2\2\2L\3\2\2"+ - "\2\2N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2\2\2\2V\3\2\2\2\2X\3\2\2\2\2"+ - "Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2\2\2b\3\2\2\2\2d\3\2\2\2\2f\3"+ - "\2\2\2\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2\2\2n\3\2\2\2\2p\3\2\2\2\2r\3\2\2"+ - "\2\2t\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3\2\2\2\2|\3\2\2\2\2~\3\2\2\2\2"+ - "\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u0084\3\2\2\2\2\u0086\3\2\2\2\2\u0088"+ - "\3\2\2\2\2\u008a\3\2\2\2\2\u008c\3\2\2\2\2\u008e\3\2\2\2\2\u0090\3\2\2"+ - "\2\2\u0092\3\2\2\2\2\u0094\3\2\2\2\2\u0096\3\2\2\2\2\u0098\3\2\2\2\2\u009a"+ - "\3\2\2\2\2\u009c\3\2\2\2\2\u009e\3\2\2\2\2\u00a0\3\2\2\2\2\u00a2\3\2\2"+ - "\2\2\u00a4\3\2\2\2\2\u00a6\3\2\2\2\3\u00a8\3\2\2\2\3\u00aa\3\2\2\2\4\u00ad"+ - "\3\2\2\2\6\u00c8\3\2\2\2\b\u00cc\3\2\2\2\n\u00ce\3\2\2\2\f\u00d0\3\2\2"+ - "\2\16\u00d2\3\2\2\2\20\u00d4\3\2\2\2\22\u00d6\3\2\2\2\24\u00d8\3\2\2\2"+ - "\26\u00dc\3\2\2\2\30\u00e1\3\2\2\2\32\u00e3\3\2\2\2\34\u00e5\3\2\2\2\36"+ - "\u00e8\3\2\2\2 \u00eb\3\2\2\2\"\u00f0\3\2\2\2$\u00f6\3\2\2\2&\u00f9\3"+ - "\2\2\2(\u00fd\3\2\2\2*\u0106\3\2\2\2,\u010c\3\2\2\2.\u0113\3\2\2\2\60"+ - "\u0117\3\2\2\2\62\u011b\3\2\2\2\64\u0121\3\2\2\2\66\u0127\3\2\2\28\u012c"+ - "\3\2\2\2:\u0137\3\2\2\2<\u0139\3\2\2\2>\u013b\3\2\2\2@\u013d\3\2\2\2B"+ - "\u0140\3\2\2\2D\u0142\3\2\2\2F\u0144\3\2\2\2H\u0146\3\2\2\2J\u0149\3\2"+ - "\2\2L\u014c\3\2\2\2N\u0150\3\2\2\2P\u0152\3\2\2\2R\u0155\3\2\2\2T\u0157"+ - "\3\2\2\2V\u015a\3\2\2\2X\u015d\3\2\2\2Z\u0161\3\2\2\2\\\u0164\3\2\2\2"+ - "^\u0168\3\2\2\2`\u016a\3\2\2\2b\u016c\3\2\2\2d\u016e\3\2\2\2f\u0171\3"+ - "\2\2\2h\u0174\3\2\2\2j\u0176\3\2\2\2l\u0178\3\2\2\2n\u017b\3\2\2\2p\u017e"+ - "\3\2\2\2r\u0181\3\2\2\2t\u0184\3\2\2\2v\u0188\3\2\2\2x\u018b\3\2\2\2z"+ - "\u018e\3\2\2\2|\u0190\3\2\2\2~\u0193\3\2\2\2\u0080\u0196\3\2\2\2\u0082"+ - "\u0199\3\2\2\2\u0084\u019c\3\2\2\2\u0086\u019f\3\2\2\2\u0088\u01a2\3\2"+ - "\2\2\u008a\u01a5\3\2\2\2\u008c\u01a8\3\2\2\2\u008e\u01ac\3\2\2\2\u0090"+ - "\u01b0\3\2\2\2\u0092\u01b5\3\2\2\2\u0094\u01be\3\2\2\2\u0096\u01d0\3\2"+ - "\2\2\u0098\u01dd\3\2\2\2\u009a\u020d\3\2\2\2\u009c\u020f\3\2\2\2\u009e"+ - "\u0220\3\2\2\2\u00a0\u0225\3\2\2\2\u00a2\u022b\3\2\2\2\u00a4\u0230\3\2"+ - "\2\2\u00a6\u023b\3\2\2\2\u00a8\u024a\3\2\2\2\u00aa\u024e\3\2\2\2\u00ac"+ - "\u00ae\t\2\2\2\u00ad\u00ac\3\2\2\2\u00ae\u00af\3\2\2\2\u00af\u00ad\3\2"+ - "\2\2\u00af\u00b0\3\2\2\2\u00b0\u00b1\3\2\2\2\u00b1\u00b2\b\2\2\2\u00b2"+ - "\5\3\2\2\2\u00b3\u00b4\7\61\2\2\u00b4\u00b5\7\61\2\2\u00b5\u00b9\3\2\2"+ - "\2\u00b6\u00b8\13\2\2\2\u00b7\u00b6\3\2\2\2\u00b8\u00bb\3\2\2\2\u00b9"+ - "\u00ba\3\2\2\2\u00b9\u00b7\3\2\2\2\u00ba\u00bc\3\2\2\2\u00bb\u00b9\3\2"+ - "\2\2\u00bc\u00c9\t\3\2\2\u00bd\u00be\7\61\2\2\u00be\u00bf\7,\2\2\u00bf"+ + "\2FFHHNNffhhnn\4\2GGgg\4\2--//\6\2FFHHffhh\4\2$$^^\4\2))^^\4\2\f\f\61"+ + "\61\3\2\f\f\t\2WWeekknouuwwzz\5\2C\\aac|\6\2\62;C\\aac|\u0277\2\4\3\2"+ + "\2\2\2\6\3\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2\2\16\3\2\2\2\2\20"+ + "\3\2\2\2\2\22\3\2\2\2\2\24\3\2\2\2\2\26\3\2\2\2\2\30\3\2\2\2\2\32\3\2"+ + "\2\2\2\34\3\2\2\2\2\36\3\2\2\2\2 \3\2\2\2\2\"\3\2\2\2\2$\3\2\2\2\2&\3"+ + "\2\2\2\2(\3\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60\3\2\2\2\2\62\3"+ + "\2\2\2\2\64\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3\2\2\2\2<\3\2\2\2\2>\3"+ + "\2\2\2\2@\3\2\2\2\2B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2\2H\3\2\2\2\2J\3\2\2"+ + "\2\2L\3\2\2\2\2N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2\2\2\2V\3\2\2\2\2"+ + "X\3\2\2\2\2Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2\2\2b\3\2\2\2\2d\3"+ + "\2\2\2\2f\3\2\2\2\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2\2\2n\3\2\2\2\2p\3\2\2"+ + "\2\2r\3\2\2\2\2t\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3\2\2\2\2|\3\2\2\2\2"+ + "~\3\2\2\2\2\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u0084\3\2\2\2\2\u0086\3\2"+ + "\2\2\2\u0088\3\2\2\2\2\u008a\3\2\2\2\2\u008c\3\2\2\2\2\u008e\3\2\2\2\2"+ + "\u0090\3\2\2\2\2\u0092\3\2\2\2\2\u0094\3\2\2\2\2\u0096\3\2\2\2\2\u0098"+ + "\3\2\2\2\2\u009a\3\2\2\2\2\u009c\3\2\2\2\2\u009e\3\2\2\2\2\u00a0\3\2\2"+ + "\2\2\u00a2\3\2\2\2\2\u00a4\3\2\2\2\2\u00a6\3\2\2\2\3\u00a8\3\2\2\2\3\u00aa"+ + "\3\2\2\2\4\u00ad\3\2\2\2\6\u00c8\3\2\2\2\b\u00cc\3\2\2\2\n\u00ce\3\2\2"+ + "\2\f\u00d0\3\2\2\2\16\u00d2\3\2\2\2\20\u00d4\3\2\2\2\22\u00d6\3\2\2\2"+ + "\24\u00d8\3\2\2\2\26\u00dc\3\2\2\2\30\u00e1\3\2\2\2\32\u00e3\3\2\2\2\34"+ + "\u00e5\3\2\2\2\36\u00e8\3\2\2\2 \u00eb\3\2\2\2\"\u00f0\3\2\2\2$\u00f6"+ + "\3\2\2\2&\u00f9\3\2\2\2(\u00fd\3\2\2\2*\u0106\3\2\2\2,\u010c\3\2\2\2."+ + "\u0113\3\2\2\2\60\u0117\3\2\2\2\62\u011b\3\2\2\2\64\u0121\3\2\2\2\66\u0127"+ + "\3\2\2\28\u012c\3\2\2\2:\u0137\3\2\2\2<\u0139\3\2\2\2>\u013b\3\2\2\2@"+ + "\u013d\3\2\2\2B\u0140\3\2\2\2D\u0142\3\2\2\2F\u0144\3\2\2\2H\u0146\3\2"+ + "\2\2J\u0149\3\2\2\2L\u014c\3\2\2\2N\u0150\3\2\2\2P\u0152\3\2\2\2R\u0155"+ + "\3\2\2\2T\u0157\3\2\2\2V\u015a\3\2\2\2X\u015d\3\2\2\2Z\u0161\3\2\2\2\\"+ + "\u0164\3\2\2\2^\u0168\3\2\2\2`\u016a\3\2\2\2b\u016c\3\2\2\2d\u016e\3\2"+ + "\2\2f\u0171\3\2\2\2h\u0174\3\2\2\2j\u0176\3\2\2\2l\u0178\3\2\2\2n\u017b"+ + "\3\2\2\2p\u017e\3\2\2\2r\u0181\3\2\2\2t\u0184\3\2\2\2v\u0188\3\2\2\2x"+ + "\u018b\3\2\2\2z\u018e\3\2\2\2|\u0190\3\2\2\2~\u0193\3\2\2\2\u0080\u0196"+ + "\3\2\2\2\u0082\u0199\3\2\2\2\u0084\u019c\3\2\2\2\u0086\u019f\3\2\2\2\u0088"+ + "\u01a2\3\2\2\2\u008a\u01a5\3\2\2\2\u008c\u01a8\3\2\2\2\u008e\u01ac\3\2"+ + "\2\2\u0090\u01b0\3\2\2\2\u0092\u01b5\3\2\2\2\u0094\u01be\3\2\2\2\u0096"+ + "\u01d0\3\2\2\2\u0098\u01dd\3\2\2\2\u009a\u020d\3\2\2\2\u009c\u020f\3\2"+ + "\2\2\u009e\u0220\3\2\2\2\u00a0\u0225\3\2\2\2\u00a2\u022b\3\2\2\2\u00a4"+ + "\u0230\3\2\2\2\u00a6\u023b\3\2\2\2\u00a8\u024a\3\2\2\2\u00aa\u024e\3\2"+ + "\2\2\u00ac\u00ae\t\2\2\2\u00ad\u00ac\3\2\2\2\u00ae\u00af\3\2\2\2\u00af"+ + "\u00ad\3\2\2\2\u00af\u00b0\3\2\2\2\u00b0\u00b1\3\2\2\2\u00b1\u00b2\b\2"+ + "\2\2\u00b2\5\3\2\2\2\u00b3\u00b4\7\61\2\2\u00b4\u00b5\7\61\2\2\u00b5\u00b9"+ + "\3\2\2\2\u00b6\u00b8\13\2\2\2\u00b7\u00b6\3\2\2\2\u00b8\u00bb\3\2\2\2"+ + "\u00b9\u00ba\3\2\2\2\u00b9\u00b7\3\2\2\2\u00ba\u00bc\3\2\2\2\u00bb\u00b9"+ + "\3\2\2\2\u00bc\u00c9\t\3\2\2\u00bd\u00be\7\61\2\2\u00be\u00bf\7,\2\2\u00bf"+ "\u00c3\3\2\2\2\u00c0\u00c2\13\2\2\2\u00c1\u00c0\3\2\2\2\u00c2\u00c5\3"+ "\2\2\2\u00c3\u00c4\3\2\2\2\u00c3\u00c1\3\2\2\2\u00c4\u00c6\3\2\2\2\u00c5"+ "\u00c3\3\2\2\2\u00c6\u00c7\7,\2\2\u00c7\u00c9\7\61\2\2\u00c8\u00b3\3\2"+ @@ -340,14 +340,14 @@ class PainlessLexer extends Lexer { "\u01fa\3\2\2\2\u01fc\u01ff\3\2\2\2\u01fd\u01fe\3\2\2\2\u01fd\u01fb\3\2"+ "\2\2\u01fe\u0200\3\2\2\2\u01ff\u01fd\3\2\2\2\u0200\u020e\7$\2\2\u0201"+ "\u0209\7)\2\2\u0202\u0203\7^\2\2\u0203\u0208\7)\2\2\u0204\u0205\7^\2\2"+ - "\u0205\u0208\7^\2\2\u0206\u0208\n\16\2\2\u0207\u0202\3\2\2\2\u0207\u0204"+ + "\u0205\u0208\7^\2\2\u0206\u0208\n\17\2\2\u0207\u0202\3\2\2\2\u0207\u0204"+ "\3\2\2\2\u0207\u0206\3\2\2\2\u0208\u020b\3\2\2\2\u0209\u020a\3\2\2\2\u0209"+ "\u0207\3\2\2\2\u020a\u020c\3\2\2\2\u020b\u0209\3\2\2\2\u020c\u020e\7)"+ "\2\2\u020d\u01f5\3\2\2\2\u020d\u0201\3\2\2\2\u020e\u009b\3\2\2\2\u020f"+ - "\u0213\7\61\2\2\u0210\u0214\n\17\2\2\u0211\u0212\7^\2\2\u0212\u0214\n"+ - "\20\2\2\u0213\u0210\3\2\2\2\u0213\u0211\3\2\2\2\u0214\u0215\3\2\2\2\u0215"+ + "\u0213\7\61\2\2\u0210\u0214\n\20\2\2\u0211\u0212\7^\2\2\u0212\u0214\n"+ + "\21\2\2\u0213\u0210\3\2\2\2\u0213\u0211\3\2\2\2\u0214\u0215\3\2\2\2\u0215"+ "\u0213\3\2\2\2\u0215\u0216\3\2\2\2\u0216\u0217\3\2\2\2\u0217\u021b\7\61"+ - "\2\2\u0218\u021a\t\21\2\2\u0219\u0218\3\2\2\2\u021a\u021d\3\2\2\2\u021b"+ + "\2\2\u0218\u021a\t\22\2\2\u0219\u0218\3\2\2\2\u021a\u021d\3\2\2\2\u021b"+ "\u0219\3\2\2\2\u021b\u021c\3\2\2\2\u021c\u021e\3\2\2\2\u021d\u021b\3\2"+ "\2\2\u021e\u021f\6N\3\2\u021f\u009d\3\2\2\2\u0220\u0221\7v\2\2\u0221\u0222"+ "\7t\2\2\u0222\u0223\7w\2\2\u0223\u0224\7g\2\2\u0224\u009f\3\2\2\2\u0225"+ @@ -357,14 +357,14 @@ class PainlessLexer extends Lexer { "\u0236\5\u00a6S\2\u0231\u0232\5\24\n\2\u0232\u0233\5\u00a6S\2\u0233\u0235"+ "\3\2\2\2\u0234\u0231\3\2\2\2\u0235\u0238\3\2\2\2\u0236\u0234\3\2\2\2\u0236"+ "\u0237\3\2\2\2\u0237\u0239\3\2\2\2\u0238\u0236\3\2\2\2\u0239\u023a\6R"+ - "\4\2\u023a\u00a5\3\2\2\2\u023b\u023f\t\22\2\2\u023c\u023e\t\23\2\2\u023d"+ + "\4\2\u023a\u00a5\3\2\2\2\u023b\u023f\t\23\2\2\u023c\u023e\t\24\2\2\u023d"+ "\u023c\3\2\2\2\u023e\u0241\3\2\2\2\u023f\u023d\3\2\2\2\u023f\u0240\3\2"+ "\2\2\u0240\u00a7\3\2\2\2\u0241\u023f\3\2\2\2\u0242\u024b\7\62\2\2\u0243"+ "\u0247\t\b\2\2\u0244\u0246\t\t\2\2\u0245\u0244\3\2\2\2\u0246\u0249\3\2"+ "\2\2\u0247\u0245\3\2\2\2\u0247\u0248\3\2\2\2\u0248\u024b\3\2\2\2\u0249"+ "\u0247\3\2\2\2\u024a\u0242\3\2\2\2\u024a\u0243\3\2\2\2\u024b\u024c\3\2"+ - "\2\2\u024c\u024d\bT\4\2\u024d\u00a9\3\2\2\2\u024e\u0252\t\22\2\2\u024f"+ - "\u0251\t\23\2\2\u0250\u024f\3\2\2\2\u0251\u0254\3\2\2\2\u0252\u0250\3"+ + "\2\2\u024c\u024d\bT\4\2\u024d\u00a9\3\2\2\2\u024e\u0252\t\23\2\2\u024f"+ + "\u0251\t\24\2\2\u0250\u024f\3\2\2\2\u0251\u0254\3\2\2\2\u0252\u0250\3"+ "\2\2\2\u0252\u0253\3\2\2\2\u0253\u0255\3\2\2\2\u0254\u0252\3\2\2\2\u0255"+ "\u0256\bU\4\2\u0256\u00ab\3\2\2\2$\2\3\u00af\u00b9\u00c3\u00c8\u01b9\u01bc"+ "\u01c3\u01c6\u01cd\u01d0\u01d3\u01da\u01dd\u01e3\u01e5\u01e9\u01ee\u01f0"+ diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 84d58afa62d..9de833e5419 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -799,9 +799,27 @@ public final class Walker extends PainlessParserBaseVisitor { @Override public ANode visitString(StringContext ctx) { - String string = ctx.STRING().getText().substring(1, ctx.STRING().getText().length() - 1); + StringBuilder string = new StringBuilder(ctx.STRING().getText()); - return new EString(location(ctx), string); + // Strip the leading and trailing quotes and replace the escape sequences with their literal equivalents + int src = 1; + int dest = 0; + int end = string.length() - 1; + assert string.charAt(0) == '"' || string.charAt(0) == '\'' : "expected string to start with a quote but was [" + string + "]"; + assert string.charAt(end) == '"' || string.charAt(end) == '\'' : "expected string to end with a quote was [" + string + "]"; + while (src < end) { + char current = string.charAt(src); + if (current == '\\') { + src++; + current = string.charAt(src); + } + string.setCharAt(dest, current); + src++; + dest++; + } + string.setLength(dest); + + return new EString(location(ctx), string.toString()); } @Override diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java index 7c0694d67ba..ef2ddad5452 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java @@ -42,6 +42,7 @@ public class BasicExpressionTests extends ScriptTestCase { assertEquals((byte)255, exec("return (byte)255")); assertEquals((short)5, exec("return (short)5")); assertEquals("string", exec("return \"string\"")); + assertEquals("string", exec("return 'string'")); assertEquals(true, exec("return true")); assertEquals(false, exec("return false")); assertNull(exec("return null")); @@ -55,6 +56,37 @@ public class BasicExpressionTests extends ScriptTestCase { assertEquals('蚠', exec("return (char)100000;")); } + public void testStringEscapes() { + // The readability of this test suffers from having to escape `\` and `"` in java strings. Please be careful. Sorry! + // `\\` is a `\` + assertEquals("\\string", exec("\"\\\\string\"")); + assertEquals("\\string", exec("'\\\\string'")); + // `\"` is a `"` if surrounded by `"`s + assertEquals("\"string", exec("\"\\\"string\"")); + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> exec("'\\\"string'", false)); + assertEquals("unexpected character ['\\\"]. The only valid escape sequences in strings starting with ['] are [\\\\] and [\\'].", + e.getMessage()); + // `\'` is a `'` if surrounded by `'`s + e = expectScriptThrows(IllegalArgumentException.class, () -> exec("\"\\'string\"", false)); + assertEquals("unexpected character [\"\\']. The only valid escape sequences in strings starting with [\"] are [\\\\] and [\\\"].", + e.getMessage()); + assertEquals("'string", exec("'\\'string'")); + // We don't break native escapes like new line + assertEquals("\nstring", exec("\"\nstring\"")); + assertEquals("\nstring", exec("'\nstring'")); + + // And we're ok with strings with multiple escape sequences + assertEquals("\\str\"in\\g", exec("\"\\\\str\\\"in\\\\g\"")); + assertEquals("st\\r'i\\ng", exec("'st\\\\r\\'i\\\\ng'")); + } + + public void testStringTermination() { + // `'` inside of a string delimited with `"` should be ok + assertEquals("test'", exec("\"test'\"")); + // `"` inside of a string delimited with `'` should be ok + assertEquals("test\"", exec("'test\"'")); + } + /** declaring variables for primitive types */ public void testDeclareVariable() { assertEquals(5, exec("int i = 5; return i;")); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index f23b13341cc..4051d8457fa 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -267,4 +267,20 @@ public class WhenThingsGoWrongTests extends ScriptTestCase { assertEquals("invalid sequence of tokens near ['.'].", e.getMessage()); } + public void testBadStringEscape() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> exec("'\\a'", false)); + assertEquals("unexpected character ['\\a]. The only valid escape sequences in strings starting with ['] are [\\\\] and [\\'].", + e.getMessage()); + e = expectScriptThrows(IllegalArgumentException.class, () -> exec("\"\\a\"", false)); + assertEquals("unexpected character [\"\\a]. The only valid escape sequences in strings starting with [\"] are [\\\\] and [\\\"].", + e.getMessage()); + } + + public void testRegularUnexpectedCharacter() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> exec("'", false)); + assertEquals("unexpected character ['].", e.getMessage()); + e = expectScriptThrows(IllegalArgumentException.class, () -> exec("'cat", false)); + assertEquals("unexpected character ['cat].", e.getMessage()); + } + } From b9c2c2f6f05ca03cecb9e1bc7971e40f3f10fa37 Mon Sep 17 00:00:00 2001 From: Tim B Date: Fri, 6 Jan 2017 11:10:53 -0600 Subject: [PATCH 119/119] Move IfConfig.logIfNecessary call into bootstrap (#22455) This is related to #22116. A logIfNecessary() call makes a call to NetworkInterface.getInterfaceAddresses() requiring SocketPermission connect privileges. By moving this to bootstrap the logging call can be made before installing the SecurityManager. --- .../main/java/org/elasticsearch/bootstrap/Bootstrap.java | 4 ++++ .../java/org/elasticsearch/common/network/IfConfig.java | 6 +++--- .../org/elasticsearch/common/network/NetworkService.java | 1 - .../org/elasticsearch/bootstrap/BootstrapForTesting.java | 4 ++++ 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 2ee2d69baf3..fc39f8f3635 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.IfConfig; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.env.Environment; @@ -207,6 +208,9 @@ final class Bootstrap { throw new BootstrapException(e); } + // Log ifconfig output before SecurityManager is installed + IfConfig.logIfNecessary(); + // install SM after natives, shutdown hooks, etc. try { Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings)); diff --git a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java index 7fd4cc6d2f3..8ad85150299 100644 --- a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java +++ b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java @@ -34,17 +34,17 @@ import java.util.Locale; /** * Simple class to log {@code ifconfig}-style output at DEBUG logging. */ -final class IfConfig { +public final class IfConfig { private static final Logger logger = Loggers.getLogger(IfConfig.class); private static final String INDENT = " "; /** log interface configuration at debug level, if its enabled */ - static void logIfNecessary() { + public static void logIfNecessary() { if (logger.isDebugEnabled()) { try { doLogging(); - } catch (IOException | SecurityException e) { + } catch (IOException e) { logger.warn("unable to gather network information", e); } } diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java index b72acf8064c..a469de03208 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -90,7 +90,6 @@ public class NetworkService extends AbstractComponent { public NetworkService(Settings settings, List customNameResolvers) { super(settings); - IfConfig.logIfNecessary(); this.customNameResolvers = customNameResolvers; } diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index ed8725fa008..d0558116448 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -25,6 +25,7 @@ import org.elasticsearch.SecureSM; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.network.IfConfig; import org.elasticsearch.plugins.PluginInfo; import org.junit.Assert; @@ -89,6 +90,9 @@ public class BootstrapForTesting { throw new RuntimeException("found jar hell in test classpath", e); } + // Log ifconfig output before SecurityManager is installed + IfConfig.logIfNecessary(); + // install security manager if requested if (systemPropertyAsBoolean("tests.security.manager", true)) { try {