From c08daf25892c12f7219a097e8b9290dff280c047 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 27 Apr 2018 15:26:46 +0200 Subject: [PATCH 01/68] Build global ordinals terms bucket from matching ordinals (#30166) The global ordinals terms aggregator has an option to remap global ordinals to dense ordinal that match the request. This mode is automatically picked when the terms aggregator is a child of another bucket aggregator or when it needs to defer buckets to an aggregation that is used in the ordering of the terms. Though when building the final buckets, this aggregator loops over all possible global ordinals rather than using the hash map that was built to remap the ordinals. For fields with high cardinality this is highly inefficient and can lead to slow responses even when the number of terms that match the query is low. This change fixes this performance issue by using the hash table of matching ordinals to perform the pruning of the final buckets for the terms and significant_terms aggregation. I ran a simple benchmark with 1M documents containing 0 to 10 keywords randomly selected among 1M unique terms. This field is used to perform a multi-level terms aggregation using rally to collect the response times. The aggregation below is an example of a two-level terms aggregation that was used to perform the benchmark: ``` "aggregations":{ "1":{ "terms":{ "field":"keyword" }, "aggregations":{ "2":{ "terms":{ "field":"keyword" } } } } } ``` | Levels of aggregation | 50th percentile ms (master) | 50th percentile ms (patch) | | --- | --- | --- | | 2 | 640.41ms | 577.499ms | | 3 | 2239.66ms | 600.154ms | | 4 | 14141.2ms | 703.512ms | Closes #30117 --- ...balOrdinalsSignificantTermsAggregator.java | 21 ++++++++++----- .../GlobalOrdinalsStringTermsAggregator.java | 27 ++++++++++++------- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java index 66b8f8d5b15..25f83caa3eb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java @@ -20,10 +20,8 @@ package org.elasticsearch.search.aggregations.bucket.significant; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -103,11 +101,22 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue<>(size); SignificantStringTerms.Bucket spare = null; - for (long globalTermOrd = 0; globalTermOrd < valueCount; ++globalTermOrd) { - if (includeExclude != null && !acceptedGlobalOrdinals.get(globalTermOrd)) { + final boolean needsFullScan = bucketOrds == null || bucketCountThresholds.getMinDocCount() == 0; + final long maxId = needsFullScan ? valueCount : bucketOrds.size(); + for (long ord = 0; ord < maxId; ord++) { + final long globalOrd; + final long bucketOrd; + if (needsFullScan) { + bucketOrd = bucketOrds == null ? ord : bucketOrds.find(ord); + globalOrd = ord; + } else { + assert bucketOrds != null; + bucketOrd = ord; + globalOrd = bucketOrds.get(ord); + } + if (includeExclude != null && !acceptedGlobalOrdinals.get(globalOrd)) { continue; } - final long bucketOrd = getBucketOrd(globalTermOrd); final int bucketDocCount = bucketOrd < 0 ? 0 : bucketDocCount(bucketOrd); if (bucketCountThresholds.getMinDocCount() > 0 && bucketDocCount == 0) { continue; @@ -120,7 +129,7 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null, format); } spare.bucketOrd = bucketOrd; - copy(lookupGlobalOrd.apply(globalTermOrd), spare.termBytes); + copy(lookupGlobalOrd.apply(globalOrd), spare.termBytes); spare.subsetDf = bucketDocCount; spare.subsetSize = subsetSize; spare.supersetDf = termsAggFactory.getBackgroundFrequency(spare.termBytes); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 6ad14b8d0f9..03eb00337e9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -71,7 +71,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr protected final long valueCount; protected final GlobalOrdLookupFunction lookupGlobalOrd; - private final LongHash bucketOrds; + protected final LongHash bucketOrds; public interface GlobalOrdLookupFunction { BytesRef apply(long ord) throws IOException; @@ -107,10 +107,6 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr return bucketOrds != null; } - protected final long getBucketOrd(long globalOrd) { - return bucketOrds == null ? globalOrd : bucketOrds.find(globalOrd); - } - private void collectGlobalOrd(int doc, long globalOrd, LeafBucketCollector sub) throws IOException { if (bucketOrds == null) { collectExistingBucket(sub, doc, globalOrd); @@ -188,17 +184,28 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr long otherDocCount = 0; BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, order.comparator(this)); OrdBucket spare = new OrdBucket(-1, 0, null, showTermDocCountError, 0); - for (long globalTermOrd = 0; globalTermOrd < valueCount; ++globalTermOrd) { - if (includeExclude != null && !acceptedGlobalOrdinals.get(globalTermOrd)) { + final boolean needsFullScan = bucketOrds == null || bucketCountThresholds.getMinDocCount() == 0; + final long maxId = needsFullScan ? valueCount : bucketOrds.size(); + for (long ord = 0; ord < maxId; ord++) { + final long globalOrd; + final long bucketOrd; + if (needsFullScan) { + bucketOrd = bucketOrds == null ? ord : bucketOrds.find(ord); + globalOrd = ord; + } else { + assert bucketOrds != null; + bucketOrd = ord; + globalOrd = bucketOrds.get(ord); + } + if (includeExclude != null && !acceptedGlobalOrdinals.get(globalOrd)) { continue; } - final long bucketOrd = getBucketOrd(globalTermOrd); final int bucketDocCount = bucketOrd < 0 ? 0 : bucketDocCount(bucketOrd); if (bucketCountThresholds.getMinDocCount() > 0 && bucketDocCount == 0) { continue; } otherDocCount += bucketDocCount; - spare.globalOrd = globalTermOrd; + spare.globalOrd = globalOrd; spare.bucketOrd = bucketOrd; spare.docCount = bucketDocCount; if (bucketCountThresholds.getShardMinDocCount() <= spare.docCount) { @@ -378,7 +385,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr } final long ord = i - 1; // remember we do +1 when counting final long globalOrd = mapping.applyAsLong(ord); - long bucketOrd = getBucketOrd(globalOrd); + long bucketOrd = bucketOrds == null ? globalOrd : bucketOrds.find(globalOrd); incrementBucketDocCount(bucketOrd, inc); } } From a7e69b07a17821b45b56bb06b05c6635872bcf35 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 27 Apr 2018 10:07:38 -0400 Subject: [PATCH 02/68] Docs: Document `failures` on reindex and friends We already had *some* documentation of the batch nature of `reindex` and friends but it wasn't super obvious how it interacted with the `failures` element in the response. This adds some more documentation the `failures` element. --- docs/reference/docs/delete-by-query.asciidoc | 9 ++++++--- docs/reference/docs/reindex.asciidoc | 20 +++++++++++--------- docs/reference/docs/update-by-query.asciidoc | 10 +++++++--- 3 files changed, 24 insertions(+), 15 deletions(-) diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index be015a811e9..f9919483e5a 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -284,9 +284,12 @@ executed again in order to conform to `requests_per_second`. `failures`:: -Array of all indexing failures. If this is non-empty then the request aborted -because of those failures. See `conflicts` for how to prevent version conflicts -from aborting the operation. +Array of failures if there were any unrecoverable errors during the process. If +this is non-empty then the request aborted because of those failures. +Delete-by-query is implemented using batches and any failure causes the entire +process to abort but all failures in the current batch are collected into the +array. You can use the `conflicts` option to prevent reindex from aborting on +version conflicts. [float] diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 5f34371ab84..e8283abfc2e 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -161,12 +161,12 @@ POST _reindex `index` and `type` in `source` can both be lists, allowing you to copy from lots of sources in one request. This will copy documents from the `_doc` and -`post` types in the `twitter` and `blog` index. The copied documents would include the -`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more +`post` types in the `twitter` and `blog` index. The copied documents would include the +`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more specific parameters, you can use `query`. -The Reindex API makes no effort to handle ID collisions. For such issues, the target index -will remain valid, but it's not easy to predict which document will survive because +The Reindex API makes no effort to handle ID collisions. For such issues, the target index +will remain valid, but it's not easy to predict which document will survive because the iteration order isn't well defined. [source,js] @@ -666,9 +666,11 @@ executed again in order to conform to `requests_per_second`. `failures`:: -Array of all indexing failures. If this is non-empty then the request aborted -because of those failures. See `conflicts` for how to prevent version conflicts -from aborting the operation. +Array of failures if there were any unrecoverable errors during the process. If +this is non-empty then the request aborted because of those failures. Reindex +is implemented using batches and any failure causes the entire process to abort +but all failures in the current batch are collected into the array. You can use +the `conflicts` option to prevent reindex from aborting on version conflicts. [float] [[docs-reindex-task-api]] @@ -1004,7 +1006,7 @@ number for most indices. If slicing manually or otherwise tuning automatic slicing, use these guidelines. Query performance is most efficient when the number of `slices` is equal to the -number of shards in the index. If that number is large (e.g. 500), +number of shards in the index. If that number is large (e.g. 500), choose a lower number as too many `slices` will hurt performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. @@ -1018,7 +1020,7 @@ documents being reindexed and cluster resources. [float] === Reindex daily indices -You can use `_reindex` in combination with <> +You can use `_reindex` in combination with <> to reindex daily indices to apply a new template to the existing documents. Assuming you have indices consisting of documents as follows: diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 482f3d62f5d..1d81e4a44ff 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -338,9 +338,13 @@ executed again in order to conform to `requests_per_second`. `failures`:: -Array of all indexing failures. If this is non-empty then the request aborted -because of those failures. See `conflicts` for how to prevent version conflicts -from aborting the operation. +Array of failures if there were any unrecoverable errors during the process. If +this is non-empty then the request aborted because of those failures. +Update-by-query is implemented using batches and any failure causes the entire +process to abort but all failures in the current batch are collected into the +array. You can use the `conflicts` option to prevent reindex from aborting on +version conflicts. + [float] From 63148dd9ba35edc70b629031d2872e7d06046e21 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 27 Apr 2018 16:29:59 +0200 Subject: [PATCH 03/68] Fail snapshot operations early on repository corruption (#30140) A NullPointerException is thrown when trying to create or delete a snapshot in a repository that has been written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. This is because the way snapshots are formatted in the repository snapshots index file changed in #24477. This commit changes the parsing of the repository index file so that it now detects a corrupted index file and fails early the snapshot operation. closes #29052 --- docs/CHANGELOG.asciidoc | 3 + docs/reference/modules/snapshots.asciidoc | 12 +-- .../TransportSnapshotsStatusAction.java | 6 +- .../repositories/RepositoryData.java | 39 +++++---- .../snapshots/SnapshotsService.java | 3 +- .../repositories/RepositoryDataTests.java | 87 ++++++++++++++++++- 6 files changed, 121 insertions(+), 29 deletions(-) diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index fde295b56ba..6fb989de7c1 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -24,6 +24,9 @@ === Bug Fixes +Fail snapshot operations early when creating or deleting a snapshot on a repository that has been +written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140]) + === Regressions === Known Issues diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index ea3f99debb9..693d537d732 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -44,12 +44,12 @@ If you register same snapshot repository with multiple clusters, only one cluster should have write access to the repository. All other clusters connected to that repository should set the repository to `readonly` mode. -NOTE: The snapshot format can change across major versions, so if you have -clusters on different major versions trying to write the same repository, -new snapshots written by one version will not be visible to the other. While -setting the repository to `readonly` on all but one of the clusters should work -with multiple clusters differing by one major version, it is not a supported -configuration. +IMPORTANT: The snapshot format can change across major versions, so if you have +clusters on different versions trying to write the same repository, snapshots +written by one version may not be visible to the other and the repository could +be corrupted. While setting the repository to `readonly` on all but one of the +clusters should work with multiple clusters differing by one major version, it +is not a supported configuration. [source,js] ----------------------------------- diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index dc13c8dab51..949918f88a1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -230,9 +230,9 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction shardStatusBuilder = new ArrayList<>(); if (snapshotInfo.state().completed()) { - Map shardStatues = - snapshotsService.snapshotShards(request.repository(), snapshotInfo); - for (Map.Entry shardStatus : shardStatues.entrySet()) { + Map shardStatuses = + snapshotsService.snapshotShards(repositoryName, repositoryData, snapshotInfo); + for (Map.Entry shardStatus : shardStatuses.entrySet()) { IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue().asCopy(); shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus)); } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java index 102bc5a5f05..7a8d8327d5e 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -230,13 +230,6 @@ public final class RepositoryData { return snapshotIds; } - /** - * Initializes the indices in the repository metadata; returns a new instance. - */ - public RepositoryData initIndices(final Map> indexSnapshots) { - return new RepositoryData(genId, snapshotIds, snapshotStates, indexSnapshots, incompatibleSnapshotIds); - } - @Override public boolean equals(Object obj) { if (this == obj) { @@ -352,9 +345,10 @@ public final class RepositoryData { * Reads an instance of {@link RepositoryData} from x-content, loading the snapshots and indices metadata. */ public static RepositoryData snapshotsFromXContent(final XContentParser parser, long genId) throws IOException { - Map snapshots = new HashMap<>(); - Map snapshotStates = new HashMap<>(); - Map> indexSnapshots = new HashMap<>(); + final Map snapshots = new HashMap<>(); + final Map snapshotStates = new HashMap<>(); + final Map> indexSnapshots = new HashMap<>(); + if (parser.nextToken() == XContentParser.Token.START_OBJECT) { while (parser.nextToken() == XContentParser.Token.FIELD_NAME) { String field = parser.currentName(); @@ -397,17 +391,18 @@ public final class RepositoryData { throw new ElasticsearchParseException("start object expected [indices]"); } while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - String indexName = parser.currentName(); - String indexId = null; - Set snapshotIds = new LinkedHashSet<>(); + final String indexName = parser.currentName(); + final Set snapshotIds = new LinkedHashSet<>(); + + IndexId indexId = null; if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new ElasticsearchParseException("start object expected index[" + indexName + "]"); } while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - String indexMetaFieldName = parser.currentName(); + final String indexMetaFieldName = parser.currentName(); parser.nextToken(); if (INDEX_ID.equals(indexMetaFieldName)) { - indexId = parser.text(); + indexId = new IndexId(indexName, parser.text()); } else if (SNAPSHOTS.equals(indexMetaFieldName)) { if (parser.currentToken() != XContentParser.Token.START_ARRAY) { throw new ElasticsearchParseException("start array expected [snapshots]"); @@ -428,12 +423,22 @@ public final class RepositoryData { // since we already have the name/uuid combo in the snapshots array uuid = parser.text(); } - snapshotIds.add(snapshots.get(uuid)); + + SnapshotId snapshotId = snapshots.get(uuid); + if (snapshotId != null) { + snapshotIds.add(snapshotId); + } else { + // A snapshotted index references a snapshot which does not exist in + // the list of snapshots. This can happen when multiple clusters in + // different versions create or delete snapshot in the same repository. + throw new ElasticsearchParseException("Detected a corrupted repository, index " + indexId + + " references an unknown snapshot uuid [" + uuid + "]"); + } } } } assert indexId != null; - indexSnapshots.put(new IndexId(indexName, indexId), snapshotIds); + indexSnapshots.put(indexId, snapshotIds); } } else { throw new ElasticsearchParseException("unknown field name [" + field + "]"); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index daf5c78b78c..5665680fd9c 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -592,10 +592,9 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus * @return map of shard id to snapshot status */ public Map snapshotShards(final String repositoryName, + final RepositoryData repositoryData, final SnapshotInfo snapshotInfo) throws IOException { final Repository repository = repositoriesService.repository(repositoryName); - final RepositoryData repositoryData = repository.getRepositoryData(); - final Map shardStatus = new HashMap<>(); for (String index : snapshotInfo.indices()) { IndexId indexId = repositoryData.resolveIndexId(index); diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java index 8c1e242b326..db8aa615c14 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java @@ -19,11 +19,14 @@ package org.elasticsearch.repositories; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotState; @@ -39,7 +42,11 @@ import java.util.List; import java.util.Map; import java.util.Set; +import static java.util.Collections.emptySet; +import static java.util.Collections.singleton; import static org.elasticsearch.repositories.RepositoryData.EMPTY_REPO_GEN; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; /** @@ -101,15 +108,18 @@ public class RepositoryDataTests extends ESTestCase { public void testInitIndices() { final int numSnapshots = randomIntBetween(1, 30); final Map snapshotIds = new HashMap<>(numSnapshots); + final Map snapshotStates = new HashMap<>(numSnapshots); for (int i = 0; i < numSnapshots; i++) { final SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()); snapshotIds.put(snapshotId.getUUID(), snapshotId); + snapshotStates.put(snapshotId.getUUID(), randomFrom(SnapshotState.values())); } RepositoryData repositoryData = new RepositoryData(EMPTY_REPO_GEN, snapshotIds, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyList()); // test that initializing indices works Map> indices = randomIndices(snapshotIds); - RepositoryData newRepoData = repositoryData.initIndices(indices); + RepositoryData newRepoData = new RepositoryData(repositoryData.getGenId(), snapshotIds, snapshotStates, indices, + new ArrayList<>(repositoryData.getIncompatibleSnapshotIds())); List expected = new ArrayList<>(repositoryData.getSnapshotIds()); Collections.sort(expected); List actual = new ArrayList<>(newRepoData.getSnapshotIds()); @@ -153,6 +163,81 @@ public class RepositoryDataTests extends ESTestCase { assertNull(repositoryData.getSnapshotState(new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()))); } + public void testIndexThatReferencesAnUnknownSnapshot() throws IOException { + final XContent xContent = randomFrom(XContentType.values()).xContent(); + final RepositoryData repositoryData = generateRandomRepoData(); + + XContentBuilder builder = XContentBuilder.builder(xContent); + repositoryData.snapshotsToXContent(builder, ToXContent.EMPTY_PARAMS); + RepositoryData parsedRepositoryData = RepositoryData.snapshotsFromXContent(createParser(builder), repositoryData.getGenId()); + assertEquals(repositoryData, parsedRepositoryData); + + Map snapshotIds = new HashMap<>(); + Map snapshotStates = new HashMap<>(); + for (SnapshotId snapshotId : parsedRepositoryData.getSnapshotIds()) { + snapshotIds.put(snapshotId.getUUID(), snapshotId); + snapshotStates.put(snapshotId.getUUID(), parsedRepositoryData.getSnapshotState(snapshotId)); + } + + final IndexId corruptedIndexId = randomFrom(parsedRepositoryData.getIndices().values()); + + Map> indexSnapshots = new HashMap<>(); + for (Map.Entry snapshottedIndex : parsedRepositoryData.getIndices().entrySet()) { + IndexId indexId = snapshottedIndex.getValue(); + Set snapshotsIds = new LinkedHashSet<>(parsedRepositoryData.getSnapshots(indexId)); + if (corruptedIndexId.equals(indexId)) { + snapshotsIds.add(new SnapshotId("_uuid", "_does_not_exist")); + } + indexSnapshots.put(indexId, snapshotsIds); + } + assertNotNull(corruptedIndexId); + + RepositoryData corruptedRepositoryData = new RepositoryData(parsedRepositoryData.getGenId(), snapshotIds, snapshotStates, + indexSnapshots, new ArrayList<>(parsedRepositoryData.getIncompatibleSnapshotIds())); + + final XContentBuilder corruptedBuilder = XContentBuilder.builder(xContent); + corruptedRepositoryData.snapshotsToXContent(corruptedBuilder, ToXContent.EMPTY_PARAMS); + + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> + RepositoryData.snapshotsFromXContent(createParser(corruptedBuilder), corruptedRepositoryData.getGenId())); + assertThat(e.getMessage(), equalTo("Detected a corrupted repository, index " + corruptedIndexId + " references an unknown " + + "snapshot uuid [_does_not_exist]")); + } + + public void testIndexThatReferenceANullSnapshot() throws IOException { + final XContentBuilder builder = XContentBuilder.builder(randomFrom(XContentType.JSON).xContent()); + builder.startObject(); + { + builder.startArray("snapshots"); + builder.value(new SnapshotId("_name", "_uuid")); + builder.endArray(); + + builder.startObject("indices"); + { + builder.startObject("docs"); + { + builder.field("id", "_id"); + builder.startArray("snapshots"); + { + builder.startObject(); + if (randomBoolean()) { + builder.field("name", "_name"); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> + RepositoryData.snapshotsFromXContent(createParser(builder), randomNonNegativeLong())); + assertThat(e.getMessage(), equalTo("Detected a corrupted repository, index [docs/_id] references an unknown snapshot uuid [null]")); + } + public static RepositoryData generateRandomRepoData() { final int numIndices = randomIntBetween(1, 30); final List indices = new ArrayList<>(numIndices); From 7ae3b3b155b3729b5a2475315f9a742ff1c56756 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 27 Apr 2018 16:49:06 +0200 Subject: [PATCH 04/68] Move repository-s3 fixture tests to QA test project (#29372) This commit moves the repository-s3 fixture test added in #29296 in a new `repository-s3/qa/amazon-s3` project. This new project allows the REST integration tests to be executed using the real S3 service when all the required environment variables are provided. When no env var is provided, then the tests are executed using the fixture added in #29296. The REST tests located at the `repository-s3`plugin project now only verify that the plugin is correctly loaded. The REST tests have been adapted to allow a bucket name and a base path to be specified as env vars. This way it is possible to run the tests with different base paths (could be anything, like a CI job name or a branch name) without multiplicating buckets. Related to #29349 --- plugins/repository-s3/build.gradle | 22 +-- .../repository-s3/qa/amazon-s3/build.gradle | 83 ++++++++ .../repositories/s3/AmazonS3Fixture.java | 0 ...azonS3RepositoryClientYamlTestSuiteIT.java | 37 ++++ .../repositories/s3/AmazonS3TestServer.java | 0 .../test/repository_s3/10_repository.yml | 183 ++++++++++++++++++ plugins/repository-s3/qa/build.gradle | 0 .../repositories/s3/S3Repository.java | 2 +- .../test/repository_s3/10_basic.yml | 180 ----------------- qa/smoke-test-plugins/build.gradle | 4 +- qa/vagrant/build.gradle | 2 +- x-pack/qa/smoke-test-plugins-ssl/build.gradle | 6 +- x-pack/qa/smoke-test-plugins/build.gradle | 6 +- 13 files changed, 318 insertions(+), 207 deletions(-) create mode 100644 plugins/repository-s3/qa/amazon-s3/build.gradle rename plugins/repository-s3/{ => qa/amazon-s3}/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java (100%) create mode 100644 plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java rename plugins/repository-s3/{ => qa/amazon-s3}/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java (100%) create mode 100644 plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml create mode 100644 plugins/repository-s3/qa/build.gradle diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 46988a2dd51..23252881cd7 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.test.AntFixture - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -66,28 +64,14 @@ test { exclude '**/*CredentialsTests.class' } -forbiddenApisTest { - // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage - bundledSignatures -= 'jdk-non-portable' - bundledSignatures += 'jdk-internal' -} - -/** A task to start the AmazonS3Fixture which emulates a S3 service **/ -task s3Fixture(type: AntFixture) { - dependsOn compileTestJava - env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" - executable = new File(project.runtimeJavaHome, 'bin/java') - args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, 'bucket_test' +check { + // also execute the QA tests when testing the plugin + dependsOn 'qa:amazon-s3:check' } integTestCluster { - dependsOn s3Fixture - keystoreSetting 's3.client.integration_test.access_key', "s3_integration_test_access_key" keystoreSetting 's3.client.integration_test.secret_key', "s3_integration_test_secret_key" - - /* Use a closure on the string to delay evaluation until tests are executed */ - setting 's3.client.integration_test.endpoint', "http://${ -> s3Fixture.addressAndPort }" } thirdPartyAudit.excludes = [ diff --git a/plugins/repository-s3/qa/amazon-s3/build.gradle b/plugins/repository-s3/qa/amazon-s3/build.gradle new file mode 100644 index 00000000000..5e288899021 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/build.gradle @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.AntFixture + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: ':plugins:repository-s3', configuration: 'runtime') +} + +integTestCluster { + plugin ':plugins:repository-s3' +} + +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +boolean useFixture = false + +String s3AccessKey = System.getenv("amazon_s3_access_key") +String s3SecretKey = System.getenv("amazon_s3_secret_key") +String s3Bucket = System.getenv("amazon_s3_bucket") +String s3BasePath = System.getenv("amazon_s3_base_path") + +if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) { + s3AccessKey = 's3_integration_test_access_key' + s3SecretKey = 's3_integration_test_secret_key' + s3Bucket = 'bucket_test' + s3BasePath = 'integration_test' + useFixture = true +} + +/** A task to start the AmazonS3Fixture which emulates a S3 service **/ +task s3Fixture(type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3Bucket +} + +Map expansions = [ + 'bucket': s3Bucket, + 'base_path': s3BasePath +] +processTestResources { + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) +} + +integTestCluster { + keystoreSetting 's3.client.integration_test.access_key', s3AccessKey + keystoreSetting 's3.client.integration_test.secret_key', s3SecretKey + + if (useFixture) { + dependsOn s3Fixture + /* Use a closure on the string to delay evaluation until tests are executed */ + setting 's3.client.integration_test.endpoint', "http://${-> s3Fixture.addressAndPort}" + } else { + println "Using an external service to test the repository-s3 plugin" + } +} \ No newline at end of file diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java similarity index 100% rename from plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java rename to plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java new file mode 100644 index 00000000000..afcc0fa3534 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public class AmazonS3RepositoryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public AmazonS3RepositoryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java similarity index 100% rename from plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java rename to plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml b/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml new file mode 100644 index 00000000000..8b3daccf0a2 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml @@ -0,0 +1,183 @@ +# Integration tests for repository-s3 +--- +"Snapshot/Restore with repository-s3": + + # Register repository + - do: + snapshot.create_repository: + repository: repository + body: + type: s3 + settings: + bucket: ${bucket} + client: integration_test + base_path: ${base_path} + canned_acl: private + storage_class: standard + + - match: { acknowledged: true } + + # Get repository + - do: + snapshot.get_repository: + repository: repository + + - match: { repository.settings.bucket : ${bucket} } + - match: { repository.settings.client : "integration_test" } + - match: { repository.settings.base_path : ${base_path} } + - match: { repository.settings.canned_acl : "private" } + - match: { repository.settings.storage_class : "standard" } + - is_false: repository.settings.access_key + - is_false: repository.settings.secret_key + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-one + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository diff --git a/plugins/repository-s3/qa/build.gradle b/plugins/repository-s3/qa/build.gradle new file mode 100644 index 00000000000..e69de29bb2d diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 09d9782aa91..cb4f977bae7 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -156,7 +156,7 @@ class S3Repository extends BlobStoreRepository { String bucket = BUCKET_SETTING.get(metadata.settings()); if (bucket == null) { - throw new RepositoryException(metadata.name(), "No bucket defined for s3 gateway"); + throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository"); } boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml index 11f4610f6f7..7bb65a50886 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml @@ -11,183 +11,3 @@ nodes.info: {} - match: { nodes.$master.plugins.0.name: repository-s3 } ---- -"Snapshot/Restore with repository-s3": - - # Register repository - - do: - snapshot.create_repository: - repository: repository - body: - type: s3 - settings: - bucket: "bucket_test" - client: "integration_test" - canned_acl: "public-read" - storage_class: "standard" - - - match: { acknowledged: true } - - # Get repository - - do: - snapshot.get_repository: - repository: repository - - - match: {repository.settings.bucket : "bucket_test"} - - match: {repository.settings.client : "integration_test"} - - match: {repository.settings.canned_acl : "public-read"} - - match: {repository.settings.storage_class : "standard"} - - is_false: repository.settings.access_key - - is_false: repository.settings.secret_key - - # Index documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _type: doc - _id: 1 - - snapshot: one - - index: - _index: docs - _type: doc - _id: 2 - - snapshot: one - - index: - _index: docs - _type: doc - _id: 3 - - snapshot: one - - - do: - count: - index: docs - - - match: {count: 3} - - # Create a first snapshot - - do: - snapshot.create: - repository: repository - snapshot: snapshot-one - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-one } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.include_global_state: true } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.status: - repository: repository - snapshot: snapshot-one - - - is_true: snapshots - - match: { snapshots.0.snapshot: snapshot-one } - - match: { snapshots.0.state : SUCCESS } - - # Index more documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _type: doc - _id: 4 - - snapshot: two - - index: - _index: docs - _type: doc - _id: 5 - - snapshot: two - - index: - _index: docs - _type: doc - _id: 6 - - snapshot: two - - index: - _index: docs - _type: doc - _id: 7 - - snapshot: two - - - do: - count: - index: docs - - - match: {count: 7} - - # Create a second snapshot - - do: - snapshot.create: - repository: repository - snapshot: snapshot-two - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-two } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.get: - repository: repository - snapshot: snapshot-one,snapshot-two - - - is_true: snapshots - - match: { snapshots.0.state : SUCCESS } - - match: { snapshots.1.state : SUCCESS } - - # Delete the index - - do: - indices.delete: - index: docs - - # Restore the second snapshot - - do: - snapshot.restore: - repository: repository - snapshot: snapshot-two - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 7} - - # Delete the index again - - do: - indices.delete: - index: docs - - # Restore the first snapshot - - do: - snapshot.restore: - repository: repository - snapshot: snapshot-one - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 3} - - # Remove the snapshots - - do: - snapshot.delete: - repository: repository - snapshot: snapshot-two - - - do: - snapshot.delete: - repository: repository - snapshot: snapshot-one - - # Remove our repository - - do: - snapshot.delete_repository: - repository: repository diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index d60216dad19..602dfa2d6ea 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -23,9 +23,9 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' ext.pluginsCount = 0 -project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj -> +project(':plugins').getChildProjects().each { pluginName, pluginProject -> integTestCluster { - plugin subproj.path + plugin pluginProject.path } pluginsCount += 1 } diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 4086cf22057..2b1ffb28081 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -22,7 +22,7 @@ apply plugin: 'elasticsearch.vagrant' List plugins = [] for (Project subproj : project.rootProject.subprojects) { - if (subproj.path.startsWith(':plugins:') || subproj.path.equals(':example-plugins:custom-settings')) { + if (subproj.parent.path == ':plugins' || subproj.path.equals(':example-plugins:custom-settings')) { // add plugin as a dep dependencies { packaging project(path: "${subproj.path}", configuration: 'zip') diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index bc7aa9fd393..28fd4d2db49 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -1,5 +1,7 @@ import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin +import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.test.NodeInfo import javax.net.ssl.HttpsURLConnection @@ -160,9 +162,9 @@ integTestCluster.dependsOn(importClientCertificateInNodeKeyStore, importNodeCert ext.pluginsCount = 0 -project.rootProject.subprojects.findAll { it.path.startsWith(':plugins:') }.each { subproj -> +project(':plugins').getChildProjects().each { pluginName, pluginProject -> // need to get a non-decorated project object, so must re-lookup the project by path - integTestCluster.plugin(subproj.path) + integTestCluster.plugin(pluginProject.path) pluginsCount += 1 } diff --git a/x-pack/qa/smoke-test-plugins/build.gradle b/x-pack/qa/smoke-test-plugins/build.gradle index 8c232bc5f3a..207fa8204db 100644 --- a/x-pack/qa/smoke-test-plugins/build.gradle +++ b/x-pack/qa/smoke-test-plugins/build.gradle @@ -1,4 +1,6 @@ import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin +import org.elasticsearch.gradle.plugin.PluginBuildPlugin apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -8,9 +10,9 @@ dependencies { } ext.pluginsCount = 0 -project.rootProject.subprojects.findAll { it.path.startsWith(':plugins:') }.each { subproj -> +project(':plugins').getChildProjects().each { pluginName, pluginProject -> // need to get a non-decorated project object, so must re-lookup the project by path - integTestCluster.plugin(subproj.path) + integTestCluster.plugin(pluginProject.path) pluginsCount += 1 } From fee000a37fb12f6cc92f10a8f5cc402badec696d Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Fri, 27 Apr 2018 09:24:46 -0700 Subject: [PATCH 05/68] [TEST] Redirect links to new locations (#30179) We had a number of awaitsFix links that weren't updated after the xpack merge. Where possible I changed the links to the new locations, but in some circumstances the original ticket was closed (suggesting the awaitsfix should be removed) or was otherwise unclear the status. --- .../xpack/monitoring/integration/MonitoringIT.java | 2 +- .../execution/search/extractor/FieldHitExtractorTests.java | 2 +- .../test/resources/rest-api-spec/test/upgrade/10_basic.yml | 2 +- .../xpack/watcher/test/integration/BootStrapTests.java | 4 ++-- .../src/test/java/org/elasticsearch/test/OpenLdapTests.java | 2 +- .../elasticsearch/smoketest/MonitoringWithWatcherRestIT.java | 2 +- .../org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java | 2 +- .../authc/ldap/ActiveDirectorySessionFactoryTests.java | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index fbe6c465873..a77f6bf24e9 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -187,7 +187,7 @@ public class MonitoringIT extends ESSingleNodeTestCase { * This test waits for the monitoring service to collect monitoring documents and then checks that all expected documents * have been indexed with the expected information. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/4150") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29880") @SuppressWarnings("unchecked") public void testMonitoringService() throws Exception { final boolean createAPMIndex = randomBoolean(); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index 316bc4b2bf4..de36969898c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -49,7 +49,7 @@ public class FieldHitExtractorTests extends AbstractWireSerializingTestCase diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java index 1e3a9e43a10..e47727f5d10 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java @@ -145,7 +145,7 @@ public class BootStrapTests extends AbstractWatcherIntegrationTestCase { assertThat(response.getWatchesCount(), equalTo(1L)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/1915") + @AwaitsFix(bugUrl = "Supposedly fixed; https://github.com/elastic/x-pack-elasticsearch/issues/1915") public void testLoadExistingWatchesUponStartup() throws Exception { stopWatcher(); @@ -226,7 +226,7 @@ public class BootStrapTests extends AbstractWatcherIntegrationTestCase { assertSingleExecutionAndCompleteWatchHistory(numWatches, numRecords); } - @AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/3437") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29846") public void testTriggeredWatchLoading() throws Exception { createIndex("output"); client().prepareIndex("my-index", "foo", "bar") diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java index c6e10130db7..eced8a1b39a 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java @@ -161,7 +161,7 @@ public class OpenLdapTests extends ESTestCase { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/x-plugins/issues/2849") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29758") public void testTcpTimeout() throws Exception { String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java index 3d5c8e3f9f4..d89d558f02f 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java @@ -31,7 +31,7 @@ import static org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule. import static org.hamcrest.Matchers.is; @TestLogging("org.elasticsearch.client:TRACE,tracer:TRACE") -@AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/2920") +@AwaitsFix(bugUrl = "flaky tests") public class MonitoringWithWatcherRestIT extends ESRestTestCase { @After diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java index e970fcaa88a..8062d7af497 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java @@ -121,7 +121,7 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe ContentType.APPLICATION_JSON))); } - @AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/2074") + @AwaitsFix(bugUrl = "Unclear status, https://github.com/elastic/x-pack-elasticsearch/issues/2074") public void testTimeZone() throws IOException { String mode = randomMode(); index("{\"test\":\"2017-07-27 00:00:00\"}", diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java index 2f1aa0f5eb5..a319578072d 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java @@ -304,7 +304,7 @@ public class ActiveDirectorySessionFactoryTests extends AbstractActiveDirectoryT } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/3369") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29840") public void testHandlingLdapReferralErrors() throws Exception { String groupSearchBase = "DC=ad,DC=test,DC=elasticsearch,DC=com"; String userTemplate = "CN={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; From dd666599f7d3450496cdbdc29094c21792dc52a8 Mon Sep 17 00:00:00 2001 From: Sue Gallagher <36747279+Sue-Gallagher@users.noreply.github.com> Date: Fri, 27 Apr 2018 09:29:27 -0700 Subject: [PATCH 06/68] [DOCS] Added 'on a single shard' to description of max_thread_count. Closes 28518 (#29686) --- docs/reference/index-modules/merge.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/index-modules/merge.asciidoc b/docs/reference/index-modules/merge.asciidoc index 97db09ba656..cc0613ec287 100644 --- a/docs/reference/index-modules/merge.asciidoc +++ b/docs/reference/index-modules/merge.asciidoc @@ -23,7 +23,8 @@ The merge scheduler supports the following _dynamic_ setting: `index.merge.scheduler.max_thread_count`:: - The maximum number of threads that may be merging at once. Defaults to + The maximum number of threads on a single shard that may be merging at once. + Defaults to `Math.max(1, Math.min(4, Runtime.getRuntime().availableProcessors() / 2))` which works well for a good solid-state-disk (SSD). If your index is on spinning platter drives instead, decrease this to 1. From d633130e1bbc6ad3965b0c7d006dd62bef0b6629 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 27 Apr 2018 09:47:11 -0700 Subject: [PATCH 07/68] Convert FieldCapabilitiesResponse to a ToXContentObject. (#30182) --- .../action/fieldcaps/FieldCapabilitiesResponse.java | 9 +++++---- .../rest/action/RestFieldCapabilitiesAction.java | 13 +------------ .../fieldcaps/FieldCapabilitiesResponseTests.java | 4 +--- 3 files changed, 7 insertions(+), 19 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java index 5e2202ac073..959b4e572b7 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParserUtils; @@ -41,7 +41,7 @@ import java.util.stream.Collectors; /** * Response for {@link FieldCapabilitiesRequest} requests. */ -public class FieldCapabilitiesResponse extends ActionResponse implements ToXContentFragment { +public class FieldCapabilitiesResponse extends ActionResponse implements ToXContentObject { private static final ParseField FIELDS_FIELD = new ParseField("fields"); private Map> responseMap; @@ -123,8 +123,9 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(FIELDS_FIELD.getPreferredName(), responseMap); - return builder; + return builder.startObject() + .field(FIELDS_FIELD.getPreferredName(), responseMap) + .endObject(); } public static FieldCapabilitiesResponse fromXContent(XContentParser parser) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java index b2aac8d50ea..4c477334265 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java @@ -81,17 +81,6 @@ public class RestFieldCapabilitiesAction extends BaseRestHandler { fieldRequest.indicesOptions( IndicesOptions.fromRequest(request, fieldRequest.indicesOptions()) ); - return channel -> client.fieldCaps(fieldRequest, - new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(FieldCapabilitiesResponse response, - XContentBuilder builder) throws Exception { - RestStatus status = OK; - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(status, builder); - } - }); + return channel -> client.fieldCaps(fieldRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java index c8bd5d5188b..61556fd9b28 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java @@ -110,10 +110,8 @@ public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTe public void testToXContent() throws IOException { FieldCapabilitiesResponse response = createSimpleResponse(); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON) - .startObject(); + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); response.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); String generatedResponse = BytesReference.bytes(builder).utf8ToString(); assertEquals(( From 912fbb2211e4d04d305491de6e875c3e44d9475a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 27 Apr 2018 14:04:37 -0400 Subject: [PATCH 08/68] Reindex: Fold "from old" tests into reindex module (#30142) This folds the `:qa:reindex-from-old` project into the `:modules:reindex` project. This should speed up the build marginally by removing a single clsuter start up at the cost of having to wait for old versions of Elasticsearch to start up when checking reindex's integration tests. Those don't take that long so this feels worth it. --- modules/reindex/build.gradle | 62 ++++++++++++ .../remote}/ReindexFromOldRemoteIT.java | 6 +- qa/reindex-from-old/build.gradle | 94 ------------------- 3 files changed, 67 insertions(+), 95 deletions(-) rename {qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest => modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote}/ReindexFromOldRemoteIT.java (95%) delete mode 100644 qa/reindex-from-old/build.gradle diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 479fe78cc80..f34f4cf52e0 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -17,6 +17,10 @@ * under the License. */ +import org.apache.tools.ant.taskdefs.condition.Os + +import static org.elasticsearch.gradle.BuildPlugin.getJavaHome + apply plugin: 'elasticsearch.test-with-dependencies' esplugin { @@ -60,3 +64,61 @@ thirdPartyAudit.excludes = [ 'org.apache.log.Hierarchy', 'org.apache.log.Logger', ] + +// Support for testing reindex-from-remote against old Elaticsearch versions +configurations { + oldesFixture + es2 + es1 + es090 +} + +dependencies { + oldesFixture project(':test:fixtures:old-elasticsearch') + /* Right now we just test against the latest version of each major we expect + * reindex-from-remote to work against. We could randomize the versions but + * that doesn't seem worth it at this point. */ + es2 'org.elasticsearch.distribution.zip:elasticsearch:2.4.5@zip' + es1 'org.elasticsearch:elasticsearch:1.7.6@zip' + es090 'org.elasticsearch:elasticsearch:0.90.13@zip' +} + +if (Os.isFamily(Os.FAMILY_WINDOWS)) { + // we can't get the pid files in windows so we skip reindex-from-old + integTestRunner.systemProperty "tests.fromOld", "false" +} else { + integTestRunner.systemProperty "tests.fromOld", "true" + /* Set up tasks to unzip and run the old versions of ES before running the + * integration tests. */ + for (String version : ['2', '1', '090']) { + Task unzip = task("unzipEs${version}", type: Sync) { + Configuration oldEsDependency = configurations['es' + version] + dependsOn oldEsDependency + /* Use a closure here to delay resolution of the dependency until we need + * it */ + from { + oldEsDependency.collect { zipTree(it) } + } + into temporaryDir + } + Task fixture = task("oldEs${version}Fixture", + type: org.elasticsearch.gradle.test.AntFixture) { + dependsOn project.configurations.oldesFixture + dependsOn unzip + executable = new File(project.runtimeJavaHome, 'bin/java') + env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" + env 'JAVA_HOME', getJavaHome(it, 7) + args 'oldes.OldElasticsearch', + baseDir, + unzip.temporaryDir, + version == '090' + } + integTest.dependsOn fixture + integTestRunner { + /* Use a closure on the string to delay evaluation until right before we + * run the integration tests so that we can be sure that the file is + * ready. */ + systemProperty "es${version}.port", "${ -> fixture.addressAndPort }" + } + } +} diff --git a/qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest/ReindexFromOldRemoteIT.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java similarity index 95% rename from qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest/ReindexFromOldRemoteIT.java rename to modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java index 459aff34397..5d359053a66 100644 --- a/qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest/ReindexFromOldRemoteIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.smoketest; +package org.elasticsearch.index.reindex.remote; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -27,6 +27,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Booleans; import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; @@ -38,6 +39,9 @@ import static org.hamcrest.Matchers.containsString; public class ReindexFromOldRemoteIT extends ESRestTestCase { private void oldEsTestCase(String portPropertyName, String requestsPerSecond) throws IOException { + boolean enabled = Booleans.parseBoolean(System.getProperty("tests.fromOld")); + assumeTrue("test is disabled, probably because this is windows", enabled); + int oldEsPort = Integer.parseInt(System.getProperty(portPropertyName)); try (RestClient oldEs = RestClient.builder(new HttpHost("127.0.0.1", oldEsPort)).build()) { try { diff --git a/qa/reindex-from-old/build.gradle b/qa/reindex-from-old/build.gradle deleted file mode 100644 index 8da714dd627..00000000000 --- a/qa/reindex-from-old/build.gradle +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -description = """\ -Tests reindex-from-remote against some specific versions of -Elasticsearch prior to 5.0. Versions of Elasticsearch >= 5.0 -should be able to use the standard launching mechanism which -is more flexible and reliable. -""" - - -import org.apache.tools.ant.taskdefs.condition.Os - -import static org.elasticsearch.gradle.BuildPlugin.getJavaHome - -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - -integTestCluster { - // Whitelist reindexing from the local node so we can test it. - setting 'reindex.remote.whitelist', '127.0.0.1:*' -} - -configurations { - oldesFixture - es2 - es1 - es090 -} - -dependencies { - oldesFixture project(':test:fixtures:old-elasticsearch') - /* Right now we just test against the latest version of each major we expect - * reindex-from-remote to work against. We could randomize the versions but - * that doesn't seem worth it at this point. */ - es2 'org.elasticsearch.distribution.zip:elasticsearch:2.4.5@zip' - es1 'org.elasticsearch:elasticsearch:1.7.6@zip' - es090 'org.elasticsearch:elasticsearch:0.90.13@zip' -} - -if (Os.isFamily(Os.FAMILY_WINDOWS)) { - // we can't get the pid files in windows so we skip that - integTest.enabled = false -} else { - /* Set up tasks to unzip and run the old versions of ES before running the - * integration tests. */ - for (String version : ['2', '1', '090']) { - Task unzip = task("unzipEs${version}", type: Sync) { - Configuration oldEsDependency = configurations['es' + version] - dependsOn oldEsDependency - /* Use a closure here to delay resolution of the dependency until we need - * it */ - from { - oldEsDependency.collect { zipTree(it) } - } - into temporaryDir - } - Task fixture = task("oldEs${version}Fixture", - type: org.elasticsearch.gradle.test.AntFixture) { - dependsOn project.configurations.oldesFixture - dependsOn unzip - executable = new File(project.runtimeJavaHome, 'bin/java') - env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" - env 'JAVA_HOME', getJavaHome(it, 7) - args 'oldes.OldElasticsearch', - baseDir, - unzip.temporaryDir, - version == '090' - } - integTest.dependsOn fixture - integTestRunner { - /* Use a closure on the string to delay evaluation until right before we - * run the integration tests so that we can be sure that the file is - * ready. */ - systemProperty "es${version}.port", "${ -> fixture.addressAndPort }" - } - } -} From f4ed9026983d4446ded243872f5876adf127c8d3 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 27 Apr 2018 14:19:00 -0400 Subject: [PATCH 09/68] CCS: Drop http address from remote cluster info (#29568) They are expensive to fetch and no longer needed by Kibana so they *shouldn't* be needed by anyone else either. Closes #29207 --- docs/reference/cluster/remote-info.asciidoc | 3 - .../release-notes/7.0.0-alpha1.asciidoc | 4 + .../test/multi_cluster/20_info.yml | 5 -- .../remote/TransportRemoteInfoAction.java | 5 +- .../cluster/RestRemoteClusterInfoAction.java | 13 +-- .../transport/RemoteClusterConnection.java | 66 ++------------- .../transport/RemoteClusterService.java | 16 +--- .../transport/RemoteConnectionInfo.java | 67 ++++++++------- .../RemoteClusterConnectionTests.java | 83 +++---------------- .../test/multi_cluster/20_info.yml | 6 -- 10 files changed, 71 insertions(+), 197 deletions(-) diff --git a/docs/reference/cluster/remote-info.asciidoc b/docs/reference/cluster/remote-info.asciidoc index d044f4dcad2..3dfcc201e7a 100644 --- a/docs/reference/cluster/remote-info.asciidoc +++ b/docs/reference/cluster/remote-info.asciidoc @@ -19,9 +19,6 @@ the configured remote cluster alias. `seeds`:: The configured initial seed transport addresses of the remote cluster. -`http_addresses`:: - The published http addresses of all connected remote nodes. - `connected`:: True if there is at least one connection to the remote cluster. diff --git a/docs/reference/release-notes/7.0.0-alpha1.asciidoc b/docs/reference/release-notes/7.0.0-alpha1.asciidoc index 618d9e70dcb..1cc328f1659 100644 --- a/docs/reference/release-notes/7.0.0-alpha1.asciidoc +++ b/docs/reference/release-notes/7.0.0-alpha1.asciidoc @@ -10,5 +10,9 @@ The changes listed below have been released for the first time in Elasticsearch Core:: * Tribe node has been removed in favor of Cross-Cluster-Search +Cross-Cluster-Search:: +* `http_addresses` has been removed from the <> API + because it is expensive to fetch and no longer needed by Kibana. + Rest API:: * The Clear Cache API only supports `POST` as HTTP method diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml index 04086c2f2cb..b4487e4fefe 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml @@ -7,7 +7,6 @@ - match: { my_remote_cluster.num_nodes_connected: 1} - match: { my_remote_cluster.max_connections_per_cluster: 1} - match: { my_remote_cluster.initial_connect_timeout: "30s" } - - is_true: my_remote_cluster.http_addresses.0 --- "Add transient remote cluster based on the preset cluster and check remote info": @@ -38,9 +37,6 @@ - do: cluster.remote_info: {} - - set: { my_remote_cluster.http_addresses.0: remote_http } - - match: { test_remote_cluster.http_addresses.0: $remote_http } - - match: { test_remote_cluster.connected: true } - match: { my_remote_cluster.connected: true } @@ -132,4 +128,3 @@ transient: search.remote.remote1.seeds: null search.remote.remote1.skip_unavailable: null - diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java index 0410f920c8a..36974633559 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java @@ -30,6 +30,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import static java.util.stream.Collectors.toList; + public final class TransportRemoteInfoAction extends HandledTransportAction { private final RemoteClusterService remoteClusterService; @@ -45,7 +47,6 @@ public final class TransportRemoteInfoAction extends HandledTransportAction listener) { - remoteClusterService.getRemoteConnectionInfos(ActionListener.wrap(remoteConnectionInfos - -> listener.onResponse(new RemoteInfoResponse(remoteConnectionInfos)), listener::onFailure)); + listener.onResponse(new RemoteInfoResponse(remoteClusterService.getRemoteConnectionInfos().collect(toList()))); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java index 75baf8cecaa..c17be138df1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -50,16 +51,8 @@ public final class RestRemoteClusterInfoAction extends BaseRestHandler { } @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) - throws IOException { - return channel -> client.execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest(), - new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(RemoteInfoResponse response, XContentBuilder builder) throws Exception { - response.toXContent(builder, request); - return new BytesRestResponse(RestStatus.OK, builder); - } - }); + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + return channel -> client.execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest(), new RestToXContentListener<>(channel)); } @Override public boolean canTripCircuitBreaker() { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index fb4586d201b..f24a1a928d5 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -40,6 +40,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -602,66 +603,13 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo } /** - * Fetches connection info for this connection + * Get the information about remote nodes to be rendered on {@code _remote/info} requests. */ - public void getConnectionInfo(ActionListener listener) { - final Optional anyNode = connectedNodes.getAny(); - if (anyNode.isPresent() == false) { - // not connected we return immediately - RemoteConnectionInfo remoteConnectionStats = new RemoteConnectionInfo(clusterAlias, - Collections.emptyList(), Collections.emptyList(), maxNumRemoteConnections, 0, - RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), skipUnavailable); - listener.onResponse(remoteConnectionStats); - } else { - NodesInfoRequest request = new NodesInfoRequest(); - request.clear(); - request.http(true); - - transportService.sendRequest(anyNode.get(), NodesInfoAction.NAME, request, new TransportResponseHandler() { - @Override - public NodesInfoResponse newInstance() { - return new NodesInfoResponse(); - } - - @Override - public void handleResponse(NodesInfoResponse response) { - Collection httpAddresses = new HashSet<>(); - for (NodeInfo info : response.getNodes()) { - if (connectedNodes.contains(info.getNode()) && info.getHttp() != null) { - httpAddresses.add(info.getHttp().getAddress().publishAddress()); - } - } - - if (httpAddresses.size() < maxNumRemoteConnections) { - // just in case non of the connected nodes have http enabled we get other http enabled nodes instead. - for (NodeInfo info : response.getNodes()) { - if (nodePredicate.test(info.getNode()) && info.getHttp() != null) { - httpAddresses.add(info.getHttp().getAddress().publishAddress()); - } - if (httpAddresses.size() == maxNumRemoteConnections) { - break; // once we have enough return... - } - } - } - RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(clusterAlias, - seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList()), new ArrayList<>(httpAddresses), - maxNumRemoteConnections, connectedNodes.size(), - RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), skipUnavailable); - listener.onResponse(remoteConnectionInfo); - } - - @Override - public void handleException(TransportException exp) { - listener.onFailure(exp); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }); - } - + public RemoteConnectionInfo getConnectionInfo() { + List seedNodeAddresses = seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList()); + TimeValue initialConnectionTimeout = RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); + return new RemoteConnectionInfo(clusterAlias, seedNodeAddresses, maxNumRemoteConnections, connectedNodes.size(), + initialConnectionTimeout, skipUnavailable); } int getNumNodesConnected() { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index f4545713017..5de0d5e62dd 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; -import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -42,7 +41,6 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -56,6 +54,7 @@ import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.common.settings.Setting.boolSetting; @@ -348,17 +347,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl IOUtils.close(remoteClusters.values()); } - public void getRemoteConnectionInfos(ActionListener> listener) { - final Map remoteClusters = this.remoteClusters; - if (remoteClusters.isEmpty()) { - listener.onResponse(Collections.emptyList()); - } else { - final GroupedActionListener actionListener = new GroupedActionListener<>(listener, - remoteClusters.size(), Collections.emptyList()); - for (RemoteClusterConnection connection : remoteClusters.values()) { - connection.getConnectionInfo(actionListener); - } - } + public Stream getRemoteConnectionInfos() { + return remoteClusters.values().stream().map(RemoteClusterConnection::getConnectionInfo); } /** diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java index cb51f7edce5..60067e18573 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java @@ -27,17 +27,18 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import static java.util.Collections.emptyList; + import java.io.IOException; import java.util.List; import java.util.Objects; /** * This class encapsulates all remote cluster information to be rendered on - * _remote/info requests. + * {@code _remote/info} requests. */ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable { final List seedNodes; - final List httpAddresses; final int connectionsPerCluster; final TimeValue initialConnectionTimeout; final int numNodesConnected; @@ -45,12 +46,10 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable final boolean skipUnavailable; RemoteConnectionInfo(String clusterAlias, List seedNodes, - List httpAddresses, int connectionsPerCluster, int numNodesConnected, TimeValue initialConnectionTimeout, boolean skipUnavailable) { this.clusterAlias = clusterAlias; this.seedNodes = seedNodes; - this.httpAddresses = httpAddresses; this.connectionsPerCluster = connectionsPerCluster; this.numNodesConnected = numNodesConnected; this.initialConnectionTimeout = initialConnectionTimeout; @@ -59,16 +58,45 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable public RemoteConnectionInfo(StreamInput input) throws IOException { seedNodes = input.readList(TransportAddress::new); - httpAddresses = input.readList(TransportAddress::new); + if (input.getVersion().before(Version.V_7_0_0_alpha1)) { + /* + * Versions before 7.0 sent the HTTP addresses of all nodes in the + * remote cluster here but it was expensive to fetch and we + * ultimately figured out how to do without it. So we removed it. + * + * We just throw any HTTP addresses received here on the floor + * because we don't need to do anything with them. + */ + input.readList(TransportAddress::new); + } connectionsPerCluster = input.readVInt(); initialConnectionTimeout = input.readTimeValue(); numNodesConnected = input.readVInt(); clusterAlias = input.readString(); - if (input.getVersion().onOrAfter(Version.V_6_1_0)) { - skipUnavailable = input.readBoolean(); - } else { - skipUnavailable = false; + skipUnavailable = input.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(seedNodes); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + /* + * Versions before 7.0 sent the HTTP addresses of all nodes in the + * remote cluster here but it was expensive to fetch and we + * ultimately figured out how to do without it. So we removed it. + * + * When sending this request to a node that expects HTTP addresses + * here we pretend that we didn't find any. This *should* be fine + * because, after all, we haven't been using this information for + * a while. + */ + out.writeList(emptyList()); } + out.writeVInt(connectionsPerCluster); + out.writeTimeValue(initialConnectionTimeout); + out.writeVInt(numNodesConnected); + out.writeString(clusterAlias); + out.writeBoolean(skipUnavailable); } @Override @@ -80,11 +108,6 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable builder.value(addr.toString()); } builder.endArray(); - builder.startArray("http_addresses"); - for (TransportAddress addr : httpAddresses) { - builder.value(addr.toString()); - } - builder.endArray(); builder.field("connected", numNodesConnected > 0); builder.field("num_nodes_connected", numNodesConnected); builder.field("max_connections_per_cluster", connectionsPerCluster); @@ -95,19 +118,6 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable return builder; } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeList(seedNodes); - out.writeList(httpAddresses); - out.writeVInt(connectionsPerCluster); - out.writeTimeValue(initialConnectionTimeout); - out.writeVInt(numNodesConnected); - out.writeString(clusterAlias); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(skipUnavailable); - } - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -116,7 +126,6 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable return connectionsPerCluster == that.connectionsPerCluster && numNodesConnected == that.numNodesConnected && Objects.equals(seedNodes, that.seedNodes) && - Objects.equals(httpAddresses, that.httpAddresses) && Objects.equals(initialConnectionTimeout, that.initialConnectionTimeout) && Objects.equals(clusterAlias, that.clusterAlias) && skipUnavailable == that.skipUnavailable; @@ -124,7 +133,7 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable @Override public int hashCode() { - return Objects.hash(seedNodes, httpAddresses, connectionsPerCluster, initialConnectionTimeout, + return Objects.hash(seedNodes, connectionsPerCluster, initialConnectionTimeout, numNodesConnected, clusterAlias, skipUnavailable); } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 0d8a4699819..69096677664 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; @@ -717,22 +718,6 @@ public class RemoteClusterConnectionTests extends ESTestCase { } } - private static void installNodeStatsHandler(TransportService service, DiscoveryNode...nodes) { - service.registerRequestHandler(NodesInfoAction.NAME, NodesInfoRequest::new, ThreadPool.Names.SAME, false, false, - (request, channel) -> { - List nodeInfos = new ArrayList<>(); - int port = 80; - for (DiscoveryNode node : nodes) { - HttpInfo http = new HttpInfo(new BoundTransportAddress(new TransportAddress[]{node.getAddress()}, - new TransportAddress(node.getAddress().address().getAddress(), port++)), 100); - nodeInfos.add(new NodeInfo(node.getVersion(), Build.CURRENT, node, null, null, null, null, null, null, http, null, - null, null)); - } - channel.sendResponse(new NodesInfoResponse(ClusterName.DEFAULT, nodeInfos, Collections.emptyList())); - }); - - } - public void testGetConnectionInfo() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService transport1 = startTransport("seed_node", knownNodes, Version.CURRENT); @@ -753,34 +738,24 @@ public class RemoteClusterConnectionTests extends ESTestCase { service.acceptIncomingRequests(); int maxNumConnections = randomIntBetween(1, 5); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, maxNumConnections, n -> true)) { + seedNodes, service, maxNumConnections, n -> true)) { // test no nodes connected - RemoteConnectionInfo remoteConnectionInfo = assertSerialization(getRemoteConnectionInfo(connection)); + RemoteConnectionInfo remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); assertNotNull(remoteConnectionInfo); assertEquals(0, remoteConnectionInfo.numNodesConnected); - assertEquals(0, remoteConnectionInfo.seedNodes.size()); - assertEquals(0, remoteConnectionInfo.httpAddresses.size()); + assertEquals(3, remoteConnectionInfo.seedNodes.size()); assertEquals(maxNumConnections, remoteConnectionInfo.connectionsPerCluster); assertEquals("test-cluster", remoteConnectionInfo.clusterAlias); + + // Connect some nodes updateSeedNodes(connection, seedNodes); - expectThrows(RemoteTransportException.class, () -> getRemoteConnectionInfo(connection)); - - for (MockTransportService s : Arrays.asList(transport1, transport2, transport3)) { - installNodeStatsHandler(s, node1, node2, node3); - } - - remoteConnectionInfo = getRemoteConnectionInfo(connection); - remoteConnectionInfo = assertSerialization(remoteConnectionInfo); + remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); assertNotNull(remoteConnectionInfo); assertEquals(connection.getNumNodesConnected(), remoteConnectionInfo.numNodesConnected); assertEquals(Math.min(3, maxNumConnections), connection.getNumNodesConnected()); assertEquals(3, remoteConnectionInfo.seedNodes.size()); - assertEquals(remoteConnectionInfo.httpAddresses.size(), Math.min(3, maxNumConnections)); assertEquals(maxNumConnections, remoteConnectionInfo.connectionsPerCluster); assertEquals("test-cluster", remoteConnectionInfo.clusterAlias); - for (TransportAddress address : remoteConnectionInfo.httpAddresses) { - assertTrue("port range mismatch: " + address.getPort(), address.getPort() >= 80 && address.getPort() <= 90); - } } } } @@ -789,48 +764,41 @@ public class RemoteClusterConnectionTests extends ESTestCase { public void testRemoteConnectionInfo() throws IOException { RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 4, 3, TimeValue.timeValueMinutes(30), false); assertSerialization(stats); RemoteConnectionInfo stats1 = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 4, 4, TimeValue.timeValueMinutes(30), true); assertSerialization(stats1); assertNotEquals(stats, stats1); stats1 = new RemoteConnectionInfo("test_cluster_1", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 4, 3, TimeValue.timeValueMinutes(30), false); assertSerialization(stats1); assertNotEquals(stats, stats1); stats1 = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 15)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 4, 3, TimeValue.timeValueMinutes(30), false); assertSerialization(stats1); assertNotEquals(stats, stats1); stats1 = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 87)), 4, 3, TimeValue.timeValueMinutes(30), true); assertSerialization(stats1); assertNotEquals(stats, stats1); stats1 = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 4, 3, TimeValue.timeValueMinutes(325), true); assertSerialization(stats1); assertNotEquals(stats, stats1); stats1 = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 5, 3, TimeValue.timeValueMinutes(30), false); assertSerialization(stats1); assertNotEquals(stats, stats1); @@ -850,13 +818,14 @@ public class RemoteClusterConnectionTests extends ESTestCase { } public void testRemoteConnectionInfoBwComp() throws IOException { - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_5, Version.V_6_0_0); + final Version version = VersionUtils.randomVersionBetween(random(), + Version.V_6_1_0, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1)); RemoteConnectionInfo expected = new RemoteConnectionInfo("test_cluster", Collections.singletonList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Collections.singletonList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 4, 4, new TimeValue(30, TimeUnit.MINUTES), false); - String encoded = "AQQAAAAABzAuMC4wLjAAAAABAQQAAAAABzAuMC4wLjAAAABQBDwEBAx0ZXN0X2NsdXN0ZXIAAAAAAAAAAAAAAA=="; + // This version was created using the serialization code in use from 6.1 but before 7.0 + String encoded = "AQQAAAAABzAuMC4wLjAAAAABAQQAAAAABzAuMC4wLjAAAABQBDwEBAx0ZXN0X2NsdXN0ZXIA"; final byte[] data = Base64.getDecoder().decode(encoded); try (StreamInput in = StreamInput.wrap(data)) { @@ -879,55 +848,29 @@ public class RemoteClusterConnectionTests extends ESTestCase { public void testRenderConnectionInfoXContent() throws IOException { RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,80)), 4, 3, TimeValue.timeValueMinutes(30), true); stats = assertSerialization(stats); XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); stats.toXContent(builder, null); builder.endObject(); - assertEquals("{\"test_cluster\":{\"seeds\":[\"0.0.0.0:1\"],\"http_addresses\":[\"0.0.0.0:80\"],\"connected\":true," + + assertEquals("{\"test_cluster\":{\"seeds\":[\"0.0.0.0:1\"],\"connected\":true," + "\"num_nodes_connected\":3,\"max_connections_per_cluster\":4,\"initial_connect_timeout\":\"30m\"," + "\"skip_unavailable\":true}}", Strings.toString(builder)); stats = new RemoteConnectionInfo("some_other_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,1), new TransportAddress(TransportAddress.META_ADDRESS,2)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,80), new TransportAddress(TransportAddress.META_ADDRESS,81)), 2, 0, TimeValue.timeValueSeconds(30), false); stats = assertSerialization(stats); builder = XContentFactory.jsonBuilder(); builder.startObject(); stats.toXContent(builder, null); builder.endObject(); - assertEquals("{\"some_other_cluster\":{\"seeds\":[\"0.0.0.0:1\",\"0.0.0.0:2\"],\"http_addresses\":[\"0.0.0.0:80\",\"0.0.0.0:81\"]," + assertEquals("{\"some_other_cluster\":{\"seeds\":[\"0.0.0.0:1\",\"0.0.0.0:2\"]," + "\"connected\":false,\"num_nodes_connected\":0,\"max_connections_per_cluster\":2,\"initial_connect_timeout\":\"30s\"," + "\"skip_unavailable\":false}}", Strings.toString(builder)); } - private RemoteConnectionInfo getRemoteConnectionInfo(RemoteClusterConnection connection) throws Exception { - AtomicReference statsRef = new AtomicReference<>(); - AtomicReference exceptionRef = new AtomicReference<>(); - CountDownLatch latch = new CountDownLatch(1); - connection.getConnectionInfo(new ActionListener() { - @Override - public void onResponse(RemoteConnectionInfo remoteConnectionInfo) { - statsRef.set(remoteConnectionInfo); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - exceptionRef.set(e); - latch.countDown(); - } - }); - latch.await(); - if (exceptionRef.get() != null) { - throw exceptionRef.get(); - } - return statsRef.get(); - } - public void testEnsureConnected() throws IOException, InterruptedException { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml index cf374db6a3b..5ff92df69b8 100644 --- a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml @@ -41,8 +41,6 @@ teardown: - match: { my_remote_cluster.num_nodes_connected: 1} - match: { my_remote_cluster.max_connections_per_cluster: 1} - match: { my_remote_cluster.initial_connect_timeout: "30s" } - - is_true: my_remote_cluster.http_addresses.0 - --- "Add transient remote cluster based on the preset cluster and check remote info": @@ -70,9 +68,6 @@ teardown: - do: headers: { Authorization: "Basic am9lOnMza3JpdA==" } cluster.remote_info: {} - - set: { my_remote_cluster.http_addresses.0: remote_http } - - match: { test_remote_cluster.http_addresses.0: $remote_http } - - match: { test_remote_cluster.connected: true } - match: { my_remote_cluster.connected: true } @@ -87,4 +82,3 @@ teardown: - match: { my_remote_cluster.initial_connect_timeout: "30s" } - match: { test_remote_cluster.initial_connect_timeout: "30s" } - From 8401eac425d0440c0e860fb14cd112be2f8c01df Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 27 Apr 2018 15:01:42 -0400 Subject: [PATCH 10/68] Test: Switch painless test to 1 shard We *think* that #28600 is caused by warnings not being collected during one of the fan out phases of search but we're not 100% sure how this is happening. This commit drops the number of shards used for the test to 1 so there *isn't* a fan out phase. If this makes the issue go away we'll have more information. --- .../rest-api-spec/test/painless/50_script_doc_values.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml index ce8c03afec6..ede2927b992 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -3,6 +3,8 @@ setup: indices.create: index: test body: + settings: + number_of_shards: 1 mappings: test: properties: From 962c965812774eb48388209eeefe55eef0d8a487 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 27 Apr 2018 12:40:05 -0700 Subject: [PATCH 11/68] [DOCS] Adds native realm security settings (#30186) --- .../authentication/native-realm.asciidoc | 43 ++--------- .../en/settings/security-settings.asciidoc | 76 ++++++++++++------- 2 files changed, 54 insertions(+), 65 deletions(-) diff --git a/x-pack/docs/en/security/authentication/native-realm.asciidoc b/x-pack/docs/en/security/authentication/native-realm.asciidoc index 8cd150b9c1c..997920013cd 100644 --- a/x-pack/docs/en/security/authentication/native-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/native-realm.asciidoc @@ -1,5 +1,5 @@ [[native-realm]] -=== Native User Authentication +=== Native user authentication The easiest way to manage and authenticate users is with the internal `native` realm. You can use the REST APIs or Kibana to add and remove users, assign user roles, and @@ -7,7 +7,7 @@ manage user passwords. [[native-realm-configuration]] [float] -==== Configuring a Native Realm +==== Configuring a native realm The native realm is added to the realm chain by default. You don't need to explicitly configure a native realm to manage users through the REST APIs. @@ -47,45 +47,12 @@ xpack: . Restart Elasticsearch. [[native-settings]] -.Native Realm Settings -[cols="4,^3,10"] -|======================= -| Setting | Required | Description - -| `type` | yes | Indicates the realm type. Must be set to `native`. - -| `order` | no | Indicates the priority of this realm within - the realm chain. Realms with a lower order - are consulted first. Although not required, - we recommend explicitly setting this value - when you configure multiple realms. Defaults - to `Integer.MAX_VALUE`. - -| `enabled` | no | Indicates whether this realm is enabled or - disabled. When set to `false`, the realm is - not added to the realm chain and therefore - is inactive. Defaults to `true`. - -| `cache.ttl` | no | Specifies the time-to-live for cached user - entries. A user's credentials are cached for - this period of time. Specify the time period - using the standard Elasticsearch - {ref}/common-options.html#time-units[time units]. - Defaults to `20m`. - -| `cache.max_users` | no | Specifies the maximum number of user entries - that can be cached at any given time. Defaults - to 100,000. - -| `cache.hash_algo` | no | Specifies the hashing algorithm that is used - for the cached user credentials. See - <> - for the possible values. (Expert Setting) -|======================= +==== Native realm settings +See {ref}/ref-native-settings.html[Native Realm Settings]. [[managing-native-users]] -==== Managing Native Users +==== Managing native users {security} enables you to easily manage users in {kib} on the *Management / Security / Users* page. diff --git a/x-pack/docs/en/settings/security-settings.asciidoc b/x-pack/docs/en/settings/security-settings.asciidoc index eb2299e693d..046d76784fb 100644 --- a/x-pack/docs/en/settings/security-settings.asciidoc +++ b/x-pack/docs/en/settings/security-settings.asciidoc @@ -1,8 +1,8 @@ [role="xpack"] [[security-settings]] -=== Security Settings in Elasticsearch +=== Security settings in {es} ++++ -Security Settings +Security settings ++++ By default, {security} is disabled when you have a basic or trial license. To @@ -23,14 +23,14 @@ For more information about creating and updating the {es} keystore, see [float] [[general-security-settings]] -==== General Security Settings +==== General security settings `xpack.security.enabled`:: Set to `true` to enable {security} on the node. + + If set to `false`, which is the default value for basic and trial licenses, {security} is disabled. It also affects all {kib} instances that connect to this {es} instance; you do not need to disable {security} in those `kibana.yml` files. -For more information about disabling {security} in specific {kib} instances, see {kibana-ref}/security-settings-kb.html[{kib} Security Settings]. +For more information about disabling {security} in specific {kib} instances, see {kibana-ref}/security-settings-kb.html[{kib} security settings]. `xpack.security.hide_settings`:: A comma-separated list of settings that are omitted from the results of the @@ -42,16 +42,16 @@ sensitive nature of the information. [float] [[password-security-settings]] -==== Default Password Security Settings +==== Default password security settings `xpack.security.authc.accept_default_password`:: In `elasticsearch.yml`, set this to `false` to disable support for the default "changeme" password. [float] [[anonymous-access-settings]] -==== Anonymous Access Settings +==== Anonymous access settings You can configure the following anonymous access settings in `elasticsearch.yml`. For more information, see {xpack-ref}/anonymous-access.html[ -Enabling Anonymous Access]. +Enabling anonymous access]. `xpack.security.authc.anonymous.username`:: The username (principal) of the anonymous user. Defaults to `_es_anonymous_user`. @@ -69,12 +69,12 @@ access. Defaults to `true`. [float] [[field-document-security-settings]] -==== Document and Field Level Security Settings +==== Document and field level security settings You can set the following document and field level security settings in `elasticsearch.yml`. For more information, see -{xpack-ref}/field-and-document-access-control.html[Setting Up Document and Field -Level Security]. +{xpack-ref}/field-and-document-access-control.html[Setting up document and field +level security]. `xpack.security.dls_fls.enabled`:: Set to `false` to prevent document and field level security @@ -82,7 +82,7 @@ from being configured. Defaults to `true`. [float] [[token-service-settings]] -==== Token Service Settings +==== Token service settings You can set the following token service settings in `elasticsearch.yml`. @@ -98,7 +98,7 @@ The length of time that a token is valid for. By default this value is `20m` or [float] [[realm-settings]] -==== Realm Settings +==== Realm settings You configure realm settings in the `xpack.security.authc.realms` namespace in `elasticsearch.yml`. For example: @@ -124,10 +124,11 @@ xpack.security.authc.realms: ---------------------------------------- The valid settings vary depending on the realm type. For more -information, see {xpack-ref}/setting-up-authentication.html[Setting Up Authentication]. +information, see {xpack-ref}/setting-up-authentication.html[Setting up authentication]. [float] -===== Settings Valid for All Realms +[[ref-realm-settings]] +===== Settings valid for all realms `type`:: The type of the realm: `native, `ldap`, `active_directory`, `pki`, or `file`. Required. @@ -141,10 +142,31 @@ recommended when you configure multiple realms. Defaults to `Integer.MAX_VALUE`. Indicates whether a realm is enabled. You can use this setting to disable a realm without removing its configuration information. Defaults to `true`. +[[ref-native-settings]] +[float] +===== Native realm settings + +For a native realm, the `type` must be set to `native`. In addition to the +<>, you can specify +the following optional settings: + +`cache.ttl`:: The time-to-live for cached user entries. User credentials are +cached for this period of time. Specify the time period using the standard +{es} <>. Defaults to `20m`. + +`cache.max_users`:: The maximum number of user entries that can live in the +cache at any given time. Defaults to 100,000. + +`cache.hash_algo`:: (Expert Setting) The hashing algorithm that is used for the +in-memory cached user credentials. For possible values, see +{xpack-ref}/controlling-user-cache.html[Cache hash algorithms]. Defaults to +`ssha256`. + + [[ref-users-settings]] [float] -===== File Realm Settings +===== File realm settings `cache.ttl`:: The time-to-live for cached user entries--user credentials are cached for @@ -163,7 +185,7 @@ all possible values. Defaults to `ssha256`. [[ref-ldap-settings]] [float] -===== LDAP Realm Settings +===== LDAP realm settings `url`:: An LDAP URL in the format `ldap[s]://:`. Required. @@ -393,7 +415,7 @@ table for all possible values). Defaults to `ssha256`. [[ref-ad-settings]] [float] -===== Active Directory Realm Settings +===== Active Directory realm settings `url`:: A URL in the format `ldap[s]://:`. Defaults to `ldap://:389`. @@ -605,7 +627,7 @@ the in-memory cached user credentials (see {xpack-ref}/controlling-user-cache.ht [[ref-pki-settings]] [float] -===== PKI Realm Settings +===== PKI realm settings `username_pattern`:: The regular expression pattern used to extract the username from the @@ -651,7 +673,7 @@ Defaults to `100000`. [[ref-saml-settings]] [float] -===== SAML Realm Settings +===== SAML realm settings `idp.entity_id`:: The Entity ID of the SAML Identity Provider @@ -915,10 +937,10 @@ cipher suites that should be supported. [float] [[ssl-tls-settings]] -==== Default TLS/SSL Settings +==== Default TLS/SSL settings You can configure the following TLS/SSL settings in `elasticsearch.yml`. For more information, see -{xpack-ref}/encrypting-communications.html[Encrypting Communications]. These settings will be used +{xpack-ref}/encrypting-communications.html[Encrypting communications]. These settings will be used for all of {xpack} unless they have been overridden by more specific settings such as those for HTTP or Transport. @@ -961,7 +983,7 @@ Jurisdiction Policy Files_ has been installed, the default value also includes ` [float] [[tls-ssl-key-settings]] -===== Default TLS/SSL Key and Trusted Certificate Settings +===== Default TLS/SSL key and trusted certificate settings The following settings are used to specify a private key, certificate, and the trusted certificates that should be used when communicating over an SSL/TLS connection. @@ -971,7 +993,7 @@ trusted along with the certificate(s) from the <> are also available for each transport profile. By default, the settings for a transport profile will be the same as the default transport unless they @@ -1096,7 +1118,7 @@ setting, this would be `transport.profiles.$PROFILE.xpack.security.ssl.key`. [float] [[ip-filtering-settings]] -==== IP Filtering Settings +==== IP filtering settings You can configure the following settings for {xpack-ref}/ip-filtering.html[IP filtering]. `xpack.security.transport.filter.allow`:: From 4494565d8e30501357b7547606322762113ab4a5 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 27 Apr 2018 16:22:27 -0400 Subject: [PATCH 12/68] Bump changelog version to 6.4 (#30217) This commit bumps the changelog version to 6.4 as now that 6.3 is feature frozen there would be no additional entries in the changelog for 6.3.0. --- docs/CHANGELOG.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 6fb989de7c1..98be1db1b6d 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -31,7 +31,7 @@ written to by an older Elasticsearch after writing to it with a newer Elasticsea === Known Issues -== Elasticsearch version 6.3.0 +== Elasticsearch version 6.4.0 === New Features From 7eaec6031d9932305e17ff15a9648c0d5b2e6f43 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 27 Apr 2018 13:24:15 -0700 Subject: [PATCH 13/68] [DOCS] Fixes broken links (#30219) --- x-pack/docs/en/security/authentication/native-realm.asciidoc | 2 +- x-pack/docs/en/security/authentication/pki-realm.asciidoc | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/x-pack/docs/en/security/authentication/native-realm.asciidoc b/x-pack/docs/en/security/authentication/native-realm.asciidoc index 997920013cd..1c3afdacdc5 100644 --- a/x-pack/docs/en/security/authentication/native-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/native-realm.asciidoc @@ -49,7 +49,7 @@ xpack: [[native-settings]] ==== Native realm settings -See {ref}/ref-native-settings.html[Native Realm Settings]. +See {ref}/security-settings.html#ref-native-settings[Native Realm Settings]. [[managing-native-users]] ==== Managing native users diff --git a/x-pack/docs/en/security/authentication/pki-realm.asciidoc b/x-pack/docs/en/security/authentication/pki-realm.asciidoc index 57cf4dbbce0..2971f6e3887 100644 --- a/x-pack/docs/en/security/authentication/pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/pki-realm.asciidoc @@ -128,10 +128,7 @@ The `certificate_authorities` option may be used as an alternative to the [[pki-settings]] ===== PKI Realm Settings -See -{ref}/security-settings.html#_settings_valid_for_all_realms[Security Settings for All Realms] -and -{ref}/security-settings.html#ref-pki-settings[PKI Realm Settings]. +See {ref}/security-settings.html#ref-pki-settings[PKI Realm Settings]. [[assigning-roles-pki]] ==== Mapping Roles for PKI Users From 9c586a2f078213771457f74541ebeffd2c895413 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 27 Apr 2018 16:45:42 -0400 Subject: [PATCH 14/68] Do not log warn shard not-available exception in replication (#30205) Since #28049, only fully initialized shards are received write requests. This enhancement allows us to handle all exceptions. In #28571, we started strictly handling shard-not-available exceptions and tried to keep the way we report replication errors to users by only reporting if the error is not shard-not-available exceptions. However, since then we unintentionally always log warn for all exception. This change restores to the previous behavior which logs warn only if an exception is not a shard-not-available exception. Relates #28049 Relates #28571 --- .../action/support/replication/TransportWriteAction.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 2a3e8be7aa8..bdddcddaa2e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -384,7 +384,9 @@ public abstract class TransportWriteAction< @Override public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, Runnable onSuccess, Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { - logger.warn(new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception); + if (TransportActions.isShardNotAvailableException(exception) == false) { + logger.warn(new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception); + } shardStateAction.remoteShardFailed(replica.shardId(), replica.allocationId().getId(), primaryTerm, true, message, exception, createShardActionListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); } From f5978d6d33cb7eb0cc4f1b4276a1109268a72914 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 27 Apr 2018 16:14:11 -0700 Subject: [PATCH 15/68] In the field capabilities API, remove support for providing fields in the request body. (#30185) --- docs/CHANGELOG.asciidoc | 2 + docs/reference/migration/migrate_6_4.asciidoc | 12 ---- .../migration/migrate_7_0/api.asciidoc | 7 +++ docs/reference/search/field-caps.asciidoc | 14 ----- .../rest-api-spec/api/field_caps.json | 5 +- .../fieldcaps/FieldCapabilitiesRequest.java | 5 -- .../action/RestFieldCapabilitiesAction.java | 34 ++--------- .../RestFieldCapabilitiesActionTests.java | 59 ------------------- 8 files changed, 15 insertions(+), 123 deletions(-) delete mode 100644 docs/reference/migration/migrate_6_4.asciidoc delete mode 100644 server/src/test/java/org/elasticsearch/rest/action/RestFieldCapabilitiesActionTests.java diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 98be1db1b6d..dc2092ad905 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -14,6 +14,8 @@ <> ({pull}29635[#29635]) +<> ({pull}30185[#30185]) + === Breaking Java Changes === Deprecations diff --git a/docs/reference/migration/migrate_6_4.asciidoc b/docs/reference/migration/migrate_6_4.asciidoc deleted file mode 100644 index a761509597f..00000000000 --- a/docs/reference/migration/migrate_6_4.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -[[breaking-changes-6.4]] -== Breaking changes in 6.4 - -[[breaking_64_api_changes]] -=== API changes - -==== Field capabilities request format - -In the past, `fields` could be provided either as a parameter, or as part of the request -body. Specifying `fields` in the request body is now deprecated, and instead they should -always be supplied through a request parameter. In 7.0.0, the field capabilities API will -not accept `fields` supplied in the request body. diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index 2da822a7e5e..6a171ee63b5 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -33,3 +33,10 @@ Previously, `suggest` stats were folded into `search` stats. Support for the `suggest` metric on the indices stats and nodes stats APIs remained for backwards compatibility. Backwards support for the `suggest` metric was deprecated in 6.3.0 and now removed in 7.0.0. + +[[remove-field-caps-body]] +==== In the fields capabilities API, `fields` can no longer be provided in the request body. + +In the past, `fields` could be provided either as a parameter, or as part of the request +body. Specifying `fields` in the request body as opposed to a parameter was deprecated +in 6.4.0, and is now unsupported in 7.0.0. diff --git a/docs/reference/search/field-caps.asciidoc b/docs/reference/search/field-caps.asciidoc index 6cb483e7a25..1df2279c26c 100644 --- a/docs/reference/search/field-caps.asciidoc +++ b/docs/reference/search/field-caps.asciidoc @@ -20,20 +20,6 @@ GET twitter/_field_caps?fields=rating // CONSOLE // TEST[setup:twitter] -Alternatively the `fields` option can also be defined in the request body. deprecated[6.4.0, Please use a request parameter instead.] - -[source,js] --------------------------------------------------- -POST _field_caps -{ - "fields" : ["rating"] -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Specifying a request body is deprecated -- the [fields] request parameter should be used instead.] - -This is equivalent to the previous request. - Supported request options: [horizontal] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/field_caps.json b/rest-api-spec/src/main/resources/rest-api-spec/api/field_caps.json index d993dc0545b..3ba09ca314b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/field_caps.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/field_caps.json @@ -35,9 +35,6 @@ } } }, - "body": { - "description": "Field json objects containing an array of field names", - "required": false - } + "body": null } } \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index e91d9a703f4..636af6101ae 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Arrays; @@ -102,10 +101,6 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind } } - public static FieldCapabilitiesRequest parseFields(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - /** * The list of field names to retrieve */ diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java index 4c477334265..4b9d37ace91 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java @@ -20,25 +20,18 @@ package org.elasticsearch.rest.action; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; -import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; public class RestFieldCapabilitiesAction extends BaseRestHandler { public RestFieldCapabilitiesAction(Settings settings, RestController controller) { @@ -57,30 +50,13 @@ public class RestFieldCapabilitiesAction extends BaseRestHandler { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - if (request.hasContentOrSourceParam()) { - deprecationLogger.deprecated("Specifying a request body is deprecated -- the" + - " [fields] request parameter should be used instead."); - if (request.hasParam("fields")) { - throw new IllegalArgumentException("can't specify a request body and [fields]" + - " request parameter, either specify a request body or the" + - " [fields] request parameter"); - } - } + String[] indices = Strings.splitStringByCommaToArray(request.param("index")); + FieldCapabilitiesRequest fieldRequest = new FieldCapabilitiesRequest() + .fields(Strings.splitStringByCommaToArray(request.param("fields"))) + .indices(indices); - final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); - final FieldCapabilitiesRequest fieldRequest; - if (request.hasContentOrSourceParam()) { - try (XContentParser parser = request.contentOrSourceParamParser()) { - fieldRequest = FieldCapabilitiesRequest.parseFields(parser); - } - } else { - fieldRequest = new FieldCapabilitiesRequest(); - fieldRequest.fields(Strings.splitStringByCommaToArray(request.param("fields"))); - } - fieldRequest.indices(indices); fieldRequest.indicesOptions( - IndicesOptions.fromRequest(request, fieldRequest.indicesOptions()) - ); + IndicesOptions.fromRequest(request, fieldRequest.indicesOptions())); return channel -> client.fieldCaps(fieldRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/test/java/org/elasticsearch/rest/action/RestFieldCapabilitiesActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/RestFieldCapabilitiesActionTests.java deleted file mode 100644 index b8dd007f567..00000000000 --- a/server/src/test/java/org/elasticsearch/rest/action/RestFieldCapabilitiesActionTests.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action; - -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestFieldCapabilitiesAction; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.rest.FakeRestRequest; -import org.elasticsearch.usage.UsageService; -import org.junit.Before; - -import java.io.IOException; -import java.util.Collections; - -import static org.mockito.Mockito.mock; - -public class RestFieldCapabilitiesActionTests extends ESTestCase { - - private RestFieldCapabilitiesAction action; - - @Before - public void setUpAction() { - action = new RestFieldCapabilitiesAction(Settings.EMPTY, mock(RestController.class)); - } - - public void testRequestBodyIsDeprecated() throws IOException { - String content = "{ \"fields\": [\"title\"] }"; - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withPath("/_field_caps") - .withContent(new BytesArray(content), XContentType.JSON) - .build(); - action.prepareRequest(request, mock(NodeClient.class)); - - assertWarnings("Specifying a request body is deprecated -- the" + - " [fields] request parameter should be used instead."); - } -} From f381e2a00ceee01876d9c01667ba4ac946fa2459 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 28 Apr 2018 00:11:17 -0400 Subject: [PATCH 16/68] Add migration note on thread pool API changes (#29192) A previous change modified the output of the thread pool info contained in the nodes info API. This commit adds a note to the migration docs for this change. --- .../migration/migrate_7_0/api.asciidoc | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index 6a171ee63b5..83124584761 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -22,6 +22,32 @@ The following parameters starting with underscore have been removed: Instead of these removed parameters, use their non camel case equivalents without starting underscore, e.g. use `version_type` instead of `_version_type` or `versionType`. +==== Thread pool info + +In previous versions of Elasticsearch, the thread pool info returned in the +<> returned `min` and `max` values reflecting +the configured minimum and maximum number of threads that could be in each +thread pool. The trouble with this representation is that it does not align with +the configuration parameters used to configure thread pools. For +<>, the minimum number of threads is +configured by a parameter called `core` and the maximum number of threads is +configured by a parameter called `max`. For <>, there is only one configuration parameter along these lines and that +parameter is called `size`, reflecting the fixed number of threads in the +pool. This discrepancy between the API and the configuration parameters has been +rectified. Now, the API will report `core` and `max` for scaling thread pools, +and `size` for fixed thread pools. + +Similarly, in the cat thread pool API the existing `size` output has been +renamed to `pool_size` which reflects the number of threads currently in the +pool; the shortcut for this value has been changed from `s` to `psz`. The `min` +output has been renamed to `core` with a shortcut of `cr`, the shortcut for +`max` has been changed to `mx`, and the `size` output with a shortcut of `sz` +has been reused to report the configured number of threads in the pool. This +aligns the output of the API with the configuration values for thread +pools. Note that `core` and `max` will be populated for scaling thread pools, +and `size` will be populated for fixed thread pools. + ==== The parameter `fields` deprecated in 6.x has been removed from Bulk request and Update request. The Update API returns `400 - Bad request` if request contains unknown parameters (instead of ignored in the previous version). From bdde2b9824c8b3fbeca3b48ec9bbb1082a4586c2 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 28 Apr 2018 01:09:44 -0400 Subject: [PATCH 17/68] Rename request variables in shrink/split handlers (#30207) This is a code-tidying PR, a little side adventure while working on another change. Previously only shrink request existed but when the ability to split indices was added, shrink and split were done together under a single request object: the resize request object. However, the code inherited the legacy name in the naming of some variables. This commit cleans this up. --- .../admin/indices/RestShrinkIndexAction.java | 14 +++++++------- .../action/admin/indices/RestSplitIndexAction.java | 14 +++++++------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java index be875dd0a55..9b7cd422543 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java @@ -45,12 +45,12 @@ public class RestShrinkIndexAction extends BaseRestHandler { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - ResizeRequest shrinkIndexRequest = new ResizeRequest(request.param("target"), request.param("index")); - shrinkIndexRequest.setResizeType(ResizeType.SHRINK); - request.applyContentParser(shrinkIndexRequest::fromXContent); - shrinkIndexRequest.timeout(request.paramAsTime("timeout", shrinkIndexRequest.timeout())); - shrinkIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", shrinkIndexRequest.masterNodeTimeout())); - shrinkIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); - return channel -> client.admin().indices().resizeIndex(shrinkIndexRequest, new RestToXContentListener<>(channel)); + final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index")); + resizeRequest.setResizeType(ResizeType.SHRINK); + request.applyContentParser(resizeRequest::fromXContent); + resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout())); + resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout())); + resizeRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); + return channel -> client.admin().indices().resizeIndex(resizeRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java index d465c4ebb74..6624fb8d9a4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java @@ -45,12 +45,12 @@ public class RestSplitIndexAction extends BaseRestHandler { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - ResizeRequest shrinkIndexRequest = new ResizeRequest(request.param("target"), request.param("index")); - shrinkIndexRequest.setResizeType(ResizeType.SPLIT); - request.applyContentParser(shrinkIndexRequest::fromXContent); - shrinkIndexRequest.timeout(request.paramAsTime("timeout", shrinkIndexRequest.timeout())); - shrinkIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", shrinkIndexRequest.masterNodeTimeout())); - shrinkIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); - return channel -> client.admin().indices().resizeIndex(shrinkIndexRequest, new RestToXContentListener<>(channel)); + final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index")); + resizeRequest.setResizeType(ResizeType.SPLIT); + request.applyContentParser(resizeRequest::fromXContent); + resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout())); + resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout())); + resizeRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); + return channel -> client.admin().indices().resizeIndex(resizeRequest, new RestToXContentListener<>(channel)); } } From 9c8e0155527b1a8debe5d928ad2359efc66aae0a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Sat, 28 Apr 2018 09:50:54 -0400 Subject: [PATCH 18/68] Build: Mostly silence warning about html4 javadoc (#30220) This *mostly* silences `javadoc`'s warning about defaulting to generating html4 files by enabling generating html5 file for the projects for which that works. It didn't work in a half dozen projects, about half of which I've fixed in this PR, entirely by replacing `thing` with `{@code thing}`. There are a few remaining projects that contain javadoc with invalid html5. I'll fix those projects in a followup. --- .../elasticsearch/gradle/BuildPlugin.groovy | 19 +++++++++++++++++++ .../action/bulk/NoopBulkRequestBuilder.java | 7 ++++--- .../search/NoopSearchRequestBuilder.java | 8 ++++---- .../join/query/HasChildQueryBuilder.java | 2 +- .../rankeval/DiscountedCumulativeGain.java | 2 +- .../index/rankeval/RatedRequest.java | 2 +- .../IcuCollationTokenFilterFactory.java | 8 ++++---- .../IcuFoldingTokenFilterFactory.java | 2 +- .../IcuNormalizerCharFilterFactory.java | 6 +++--- .../IcuNormalizerTokenFilterFactory.java | 6 ++---- 10 files changed, 40 insertions(+), 22 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index a44b9c849d3..d59d25ddc55 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -549,6 +549,25 @@ class BuildPlugin implements Plugin { javadoc.classpath = javadoc.getClasspath().filter { f -> return classes.contains(f) == false } + /* + * Force html5 on projects that support it to silence the warning + * that `javadoc` will change its defaults in the future. + * + * But not all of our javadoc is actually valid html5. So we + * have to become valid incrementally. We only set html5 on the + * projects we have converted so that we still get the annoying + * warning on the unconverted ones. That will give us an + * incentive to convert them.... + */ + List html4Projects = [ + ':server', + ':libs:elasticsearch-core', + ':test:framework', + ':x-pack:plugin:core', + ] + if (false == html4Projects.contains(project.path)) { + javadoc.options.addBooleanOption('html5', true) + } } configureJavadocJar(project) } diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java index 1034e722e87..468152a88df 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java @@ -129,7 +129,8 @@ public class NoopBulkRequestBuilder extends ActionRequestBuilder1m. + * A timeout to wait if the index operation can't be performed immediately. + * Defaults to {@code 1m}. */ public final NoopBulkRequestBuilder setTimeout(TimeValue timeout) { request.timeout(timeout); @@ -137,7 +138,8 @@ public class NoopBulkRequestBuilder extends ActionRequestBuilder1m. + * A timeout to wait if the index operation can't be performed immediately. + * Defaults to {@code 1m}. */ public final NoopBulkRequestBuilder setTimeout(String timeout) { request.timeout(timeout); @@ -151,4 +153,3 @@ public class NoopBulkRequestBuilder extends ActionRequestBuilder_local to prefer local shards or a custom value, which guarantees that the same order + * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public NoopSearchRequestBuilder setPreference(String preference) { @@ -188,7 +188,7 @@ public class NoopSearchRequestBuilder extends ActionRequestBuilder0. + * From index to start the search from. Defaults to {@code 0}. */ public NoopSearchRequestBuilder setFrom(int from) { sourceBuilder().from(from); @@ -196,7 +196,7 @@ public class NoopSearchRequestBuilder extends ActionRequestBuilder10. + * The number of search hits to return. Defaults to {@code 10}. */ public NoopSearchRequestBuilder setSize(int size) { sourceBuilder().size(size); @@ -349,7 +349,7 @@ public class NoopSearchRequestBuilder extends ActionRequestBuilderfalse. + * {@code false}. */ public NoopSearchRequestBuilder setTrackScores(boolean trackScores) { sourceBuilder().trackScores(trackScores); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java index ebfeb5ab019..3381356da41 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java @@ -55,7 +55,7 @@ import java.util.Map; import java.util.Objects; /** - * A query builder for has_child query. + * A query builder for {@code has_child} query. */ public class HasChildQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "has_child"; diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java index 30195327798..13926d7d362 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java @@ -41,7 +41,7 @@ import static org.elasticsearch.index.rankeval.EvaluationMetric.joinHitsWithRati /** * Metric implementing Discounted Cumulative Gain. - * The `normalize` parameter can be set to calculate the normalized NDCG (set to false by default).
+ * The `normalize` parameter can be set to calculate the normalized NDCG (set to {@code false} by default).
* The optional `unknown_doc_rating` parameter can be used to specify a default rating for unlabeled documents. * @see Discounted Cumulative Gain
*/ diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java index 8f17c8203b7..392ce5d0633 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java @@ -192,7 +192,7 @@ public class RatedRequest implements Writeable, ToXContentObject { return Collections.unmodifiableMap(this.params); } - /** return the parameters if this request uses a template, otherwise this will be null. */ + /** return the parameters if this request uses a template, otherwise this will be {@code null}. */ public String getTemplateId() { return this.templateId; } diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuCollationTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuCollationTokenFilterFactory.java index d48714ffaba..b687f3aa1b7 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuCollationTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuCollationTokenFilterFactory.java @@ -36,12 +36,12 @@ import com.ibm.icu.util.ULocale; /** * An ICU based collation token filter. There are two ways to configure collation: - *

The first is simply specifying the locale (defaults to the default locale). The language - * parameter is the lowercase two-letter ISO-639 code. An additional country and variant + *

The first is simply specifying the locale (defaults to the default locale). The {@code language} + * parameter is the lowercase two-letter ISO-639 code. An additional {@code country} and {@code variant} * can be provided. *

The second option is to specify collation rules as defined in the - * Collation customization chapter in icu docs. The rules parameter can either embed the rules definition - * in the settings or refer to an external location (preferable located under the config location, relative to it). + * Collation customization chapter in icu docs. The {@code rules} parameter can either embed the rules definition + * in the settings or refer to an external location (preferable located under the {@code config} location, relative to it). */ public class IcuCollationTokenFilterFactory extends AbstractTokenFilterFactory { diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java index 60ab831e6f1..6505e1db0f8 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java @@ -35,7 +35,7 @@ import org.elasticsearch.index.IndexSettings; * Can be filtered to handle certain characters in a specified way (see http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html) * E.g national chars that should be retained (filter : "[^åäöÅÄÖ]"). * - *

The unicodeSetFilter attribute can be used to provide the UniCodeSet for filtering. + *

The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering. * * @author kimchy (shay.banon) */ diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java index 3046d6839b9..e43e163e1a0 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java @@ -32,9 +32,9 @@ import java.io.Reader; /** * Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter} to normalize character. - *

The name can be used to provide the type of normalization to perform.

- *

The mode can be used to provide 'compose' or 'decompose'. Default is compose.

- *

The unicodeSetFilter attribute can be used to provide the UniCodeSet for filtering.

+ *

The {@code name} can be used to provide the type of normalization to perform.

+ *

The {@code mode} can be used to provide 'compose' or 'decompose'. Default is compose.

+ *

The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering.

*/ public class IcuNormalizerCharFilterFactory extends AbstractCharFilterFactory implements MultiTermAwareComponent { diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java index 4e8d5d70220..1ef09f86052 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java @@ -31,10 +31,8 @@ import org.elasticsearch.index.IndexSettings; /** * Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2Filter} to normalize tokens. - *

The name can be used to provide the type of normalization to perform.

- *

The unicodeSetFilter attribute can be used to provide the UniCodeSet for filtering.

- * - * + *

The {@code name} can be used to provide the type of normalization to perform.

+ *

The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering.

*/ public class IcuNormalizerTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { From 0a6312a5e68ef4e3818729c4d0b61b3a1e0e689e Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sun, 29 Apr 2018 08:58:11 -0400 Subject: [PATCH 19/68] Collapse REST resize handlers (#30229) The REST resize handlers for shrink/split operations are effectively the same code with a minor difference. This commit collapse these handlers into a single base class. --- .../elasticsearch/action/ActionModule.java | 7 +-- ...ndexAction.java => RestResizeHandler.java} | 58 ++++++++++++++++--- .../admin/indices/RestSplitIndexAction.java | 56 ------------------ 3 files changed, 52 insertions(+), 69 deletions(-) rename server/src/main/java/org/elasticsearch/rest/action/admin/indices/{RestShrinkIndexAction.java => RestResizeHandler.java} (55%) delete mode 100644 server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 60ba0a43396..392b307a8aa 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -241,6 +241,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestRemoteClusterInfoAction; import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction; import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction; +import org.elasticsearch.rest.action.admin.indices.RestResizeHandler; import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction; import org.elasticsearch.rest.action.admin.indices.RestClearIndicesCacheAction; import org.elasticsearch.rest.action.admin.indices.RestCloseIndexAction; @@ -270,8 +271,6 @@ import org.elasticsearch.rest.action.admin.indices.RestPutMappingAction; import org.elasticsearch.rest.action.admin.indices.RestRecoveryAction; import org.elasticsearch.rest.action.admin.indices.RestRefreshAction; import org.elasticsearch.rest.action.admin.indices.RestRolloverIndexAction; -import org.elasticsearch.rest.action.admin.indices.RestShrinkIndexAction; -import org.elasticsearch.rest.action.admin.indices.RestSplitIndexAction; import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction; import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction; import org.elasticsearch.rest.action.admin.indices.RestUpgradeAction; @@ -569,8 +568,8 @@ public class ActionModule extends AbstractModule { registerHandler.accept(new RestIndexPutAliasAction(settings, restController)); registerHandler.accept(new RestIndicesAliasesAction(settings, restController)); registerHandler.accept(new RestCreateIndexAction(settings, restController)); - registerHandler.accept(new RestShrinkIndexAction(settings, restController)); - registerHandler.accept(new RestSplitIndexAction(settings, restController)); + registerHandler.accept(new RestResizeHandler.RestShrinkIndexAction(settings, restController)); + registerHandler.accept(new RestResizeHandler.RestSplitIndexAction(settings, restController)); registerHandler.accept(new RestRolloverIndexAction(settings, restController)); registerHandler.accept(new RestDeleteIndexAction(settings, restController)); registerHandler.accept(new RestCloseIndexAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java similarity index 55% rename from server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java rename to server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java index 9b7cd422543..3d0158cf95f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -31,26 +31,66 @@ import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; -public class RestShrinkIndexAction extends BaseRestHandler { - public RestShrinkIndexAction(Settings settings, RestController controller) { +public abstract class RestResizeHandler extends BaseRestHandler { + + RestResizeHandler(final Settings settings) { super(settings); - controller.registerHandler(RestRequest.Method.PUT, "/{index}/_shrink/{target}", this); - controller.registerHandler(RestRequest.Method.POST, "/{index}/_shrink/{target}", this); } @Override - public String getName() { - return "shrink_index_action"; - } + public abstract String getName(); + + abstract ResizeType getResizeType(); @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + public final RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index")); - resizeRequest.setResizeType(ResizeType.SHRINK); + resizeRequest.setResizeType(getResizeType()); request.applyContentParser(resizeRequest::fromXContent); resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout())); resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout())); resizeRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); return channel -> client.admin().indices().resizeIndex(resizeRequest, new RestToXContentListener<>(channel)); } + + public static class RestShrinkIndexAction extends RestResizeHandler { + + public RestShrinkIndexAction(final Settings settings, final RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.PUT, "/{index}/_shrink/{target}", this); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_shrink/{target}", this); + } + + @Override + public String getName() { + return "shrink_index_action"; + } + + @Override + protected ResizeType getResizeType() { + return ResizeType.SHRINK; + } + + } + + public static class RestSplitIndexAction extends RestResizeHandler { + + public RestSplitIndexAction(final Settings settings, final RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.PUT, "/{index}/_split/{target}", this); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_split/{target}", this); + } + + @Override + public String getName() { + return "split_index_action"; + } + + @Override + protected ResizeType getResizeType() { + return ResizeType.SPLIT; + } + + } + } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java deleted file mode 100644 index 6624fb8d9a4..00000000000 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.admin.indices; - -import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; -import org.elasticsearch.action.admin.indices.shrink.ResizeType; -import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; - -import java.io.IOException; - -public class RestSplitIndexAction extends BaseRestHandler { - public RestSplitIndexAction(Settings settings, RestController controller) { - super(settings); - controller.registerHandler(RestRequest.Method.PUT, "/{index}/_split/{target}", this); - controller.registerHandler(RestRequest.Method.POST, "/{index}/_split/{target}", this); - } - - @Override - public String getName() { - return "split_index_action"; - } - - @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index")); - resizeRequest.setResizeType(ResizeType.SPLIT); - request.applyContentParser(resizeRequest::fromXContent); - resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout())); - resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout())); - resizeRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); - return channel -> client.admin().indices().resizeIndex(resizeRequest, new RestToXContentListener<>(channel)); - } -} From a6624bb7425b900705b2c341f22bedbc02893a95 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 30 Apr 2018 09:48:03 +0200 Subject: [PATCH 20/68] [Test] Update test in SharedClusterSnapshotRestoreIT (#30200) The `testDeleteSnapshotWithMissingIndexAndShardMetadata` test uses an obsolete repository directory structure based on index names instead of UUIDs. Because it swallows exceptions when deleting test files the test never failed when the directory structure changed. This commit fixes the test to use the right directory structure and file names and to not swallow exceptions anymore. --- .../SharedClusterSnapshotRestoreIT.java | 45 +++++++++++++------ 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index dbaf26c9657..88a83b01078 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -93,6 +93,7 @@ import org.elasticsearch.script.StoredScriptsIT; import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.junit.annotations.TestLogging; +import java.io.IOException; import java.nio.channels.SeekableByteChannel; import java.nio.file.Files; import java.nio.file.Path; @@ -1243,30 +1244,44 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas .put("compress", false) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); - createIndex("test-idx-1", "test-idx-2"); + final String[] indices = {"test-idx-1", "test-idx-2"}; + createIndex(indices); logger.info("--> indexing some data"); indexRandom(true, client().prepareIndex("test-idx-1", "_doc").setSource("foo", "bar"), client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar")); logger.info("--> creating snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1") + .setWaitForCompletion(true).setIndices(indices).get(); + final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + + RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); + Repository repository = service.repository("test-repo"); + + final Map indexIds = repository.getRepositoryData().getIndices(); + final Path indicesPath = repo.resolve("indices"); logger.info("--> delete index metadata and shard metadata"); - Path indices = repo.resolve("indices"); - Path testIndex1 = indices.resolve("test-idx-1"); - Path testIndex2 = indices.resolve("test-idx-2"); - Path testIndex2Shard0 = testIndex2.resolve("0"); - IOUtils.deleteFilesIgnoringExceptions(testIndex1.resolve("snapshot-test-snap-1")); - IOUtils.deleteFilesIgnoringExceptions(testIndex2Shard0.resolve("snapshot-test-snap-1")); + for (String index : indices) { + Path shardZero = indicesPath.resolve(indexIds.get(index).getId()).resolve("0"); + if (randomBoolean()) { + Files.delete(shardZero.resolve("index-0")); + } + Files.delete(shardZero.resolve("snap-" + snapshotInfo.snapshotId().getUUID() + ".dat")); + } logger.info("--> delete snapshot"); client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class); + + for (String index : indices) { + assertTrue(Files.notExists(indicesPath.resolve(indexIds.get(index).getId()))); + } } public void testDeleteSnapshotWithMissingMetadata() throws Exception { @@ -1420,9 +1435,13 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas logger.info("--> deleting shard level index file"); try (Stream files = Files.list(repo.resolve("indices"))) { - files.forEach(indexPath -> - IOUtils.deleteFilesIgnoringExceptions(indexPath.resolve("0").resolve("index-0")) - ); + files.forEach(indexPath -> { + try { + Files.delete(indexPath.resolve("0").resolve("index-0")); + } catch (IOException e) { + throw new RuntimeException("Failed to delete expected file", e); + } + }); } logger.info("--> creating another snapshot"); From 60b18b5510714c3951da8a79349a2f389f8cfe49 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 30 Apr 2018 13:31:14 +0200 Subject: [PATCH 21/68] Move repository-gcs fixture test to QA project (#30208) Similarly to what has been done in for the repository-s3 plugin, this commit moves the fixture test into a dedicated repository-gcs/qa/google-cloud-storage project. It also exposes some environment variables which allows to execute the integration tests against the real Google Cloud Storage service. When the environment variables are not defined, the integration tests are executed using the fixture added in #28788. Related to #29349. --- plugins/repository-gcs/build.gradle | 56 +----- plugins/repository-gcs/qa/build.gradle | 0 .../qa/google-cloud-storage/build.gradle | 115 ++++++++++++ .../gcs/GoogleCloudStorageFixture.java | 0 ...torageRepositoryClientYamlTestSuiteIT.java | 37 ++++ .../gcs/GoogleCloudStorageTestServer.java | 0 .../test/repository_gcs/10_repository.yml | 177 ++++++++++++++++++ .../test/repository_gcs/10_basic.yml | 174 ----------------- 8 files changed, 333 insertions(+), 226 deletions(-) create mode 100644 plugins/repository-gcs/qa/build.gradle create mode 100644 plugins/repository-gcs/qa/google-cloud-storage/build.gradle rename plugins/repository-gcs/{ => qa/google-cloud-storage}/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java (100%) create mode 100644 plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryClientYamlTestSuiteIT.java rename plugins/repository-gcs/{ => qa/google-cloud-storage}/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java (100%) create mode 100644 plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index f627b7aee10..e164a8553f8 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -1,8 +1,3 @@ -import org.elasticsearch.gradle.test.AntFixture - -import java.security.KeyPair -import java.security.KeyPairGenerator - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -58,50 +53,7 @@ thirdPartyAudit.excludes = [ 'org.apache.log.Logger', ] -forbiddenApisTest { - // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage - bundledSignatures -= 'jdk-non-portable' - bundledSignatures += 'jdk-internal' -} - -/** A task to start the GoogleCloudStorageFixture which emulates a Google Cloud Storage service **/ -task googleCloudStorageFixture(type: AntFixture) { - dependsOn compileTestJava - env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" - executable = new File(project.runtimeJavaHome, 'bin/java') - args 'org.elasticsearch.repositories.gcs.GoogleCloudStorageFixture', baseDir, 'bucket_test' -} - -/** A service account file that points to the Google Cloud Storage service emulated by the fixture **/ -File serviceAccountFile = new File(project.buildDir, "generated-resources/service_account_test.json") -task createServiceAccountFile() { - dependsOn googleCloudStorageFixture - doLast { - KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA") - keyPairGenerator.initialize(1024) - KeyPair keyPair = keyPairGenerator.generateKeyPair() - String encodedKey = Base64.getEncoder().encodeToString(keyPair.private.getEncoded()) - - serviceAccountFile.parentFile.mkdirs() - serviceAccountFile.setText("{\n" + - ' "type": "service_account",\n' + - ' "project_id": "integration_test",\n' + - ' "private_key_id": "' + UUID.randomUUID().toString() + '",\n' + - ' "private_key": "-----BEGIN PRIVATE KEY-----\\n' + encodedKey + '\\n-----END PRIVATE KEY-----\\n",\n' + - ' "client_email": "integration_test@appspot.gserviceaccount.com",\n' + - ' "client_id": "123456789101112130594",\n' + - " \"auth_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/auth\",\n" + - " \"token_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/token\",\n" + - ' "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",\n' + - ' "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/integration_test%40appspot.gserviceaccount.com"\n' + - '}', 'UTF-8') - } -} - -integTestCluster { - dependsOn createServiceAccountFile, googleCloudStorageFixture - keystoreFile 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}" - - /* Use a closure on the string to delay evaluation until tests are executed */ - setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }" -} +check { + // also execute the QA tests when testing the plugin + dependsOn 'qa:google-cloud-storage:check' +} \ No newline at end of file diff --git a/plugins/repository-gcs/qa/build.gradle b/plugins/repository-gcs/qa/build.gradle new file mode 100644 index 00000000000..e69de29bb2d diff --git a/plugins/repository-gcs/qa/google-cloud-storage/build.gradle b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle new file mode 100644 index 00000000000..afd49b9f4dc --- /dev/null +++ b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.AntFixture + +import java.security.KeyPair +import java.security.KeyPairGenerator + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: ':plugins:repository-gcs', configuration: 'runtime') +} + +integTestCluster { + plugin ':plugins:repository-gcs' +} + +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +boolean useFixture = false + +String gcsServiceAccount = System.getenv("google_storage_service_account") +String gcsBucket = System.getenv("google_storage_bucket") +String gcsBasePath = System.getenv("google_storage_base_path") + +File serviceAccountFile = null +if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) { + serviceAccountFile = new File(project.buildDir, 'generated-resources/service_account_test.json') + gcsBucket = 'bucket_test' + gcsBasePath = 'integration_test' + useFixture = true +} else { + serviceAccountFile = new File(gcsServiceAccount) + if (serviceAccountFile.exists() == false || serviceAccountFile.canRead() == false) { + throw new FileNotFoundException(gcsServiceAccount, "Google Storage service account file does not exist or is not readable") + } +} + +/** A task to start the GoogleCloudStorageFixture which emulates a Google Cloud Storage service **/ +task googleCloudStorageFixture(type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.repositories.gcs.GoogleCloudStorageFixture', baseDir, 'bucket_test' +} + +/** A service account file that points to the Google Cloud Storage service emulated by the fixture **/ +task createServiceAccountFile() { + dependsOn googleCloudStorageFixture + doLast { + KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA") + keyPairGenerator.initialize(1024) + KeyPair keyPair = keyPairGenerator.generateKeyPair() + String encodedKey = Base64.getEncoder().encodeToString(keyPair.private.getEncoded()) + + serviceAccountFile.parentFile.mkdirs() + serviceAccountFile.setText("{\n" + + ' "type": "service_account",\n' + + ' "project_id": "integration_test",\n' + + ' "private_key_id": "' + UUID.randomUUID().toString() + '",\n' + + ' "private_key": "-----BEGIN PRIVATE KEY-----\\n' + encodedKey + '\\n-----END PRIVATE KEY-----\\n",\n' + + ' "client_email": "integration_test@appspot.gserviceaccount.com",\n' + + ' "client_id": "123456789101112130594",\n' + + " \"auth_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/auth\",\n" + + " \"token_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/token\",\n" + + ' "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",\n' + + ' "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/integration_test%40appspot.gserviceaccount.com"\n' + + '}', 'UTF-8') + } +} + +Map expansions = [ + 'bucket': gcsBucket, + 'base_path': gcsBasePath +] + +processTestResources { + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) +} + +integTestCluster { + keystoreFile 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}" + + if (useFixture) { + dependsOn createServiceAccountFile, googleCloudStorageFixture + /* Use a closure on the string to delay evaluation until tests are executed */ + setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }" + } else { + println "Using an external service to test the repository-gcs plugin" + } +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java similarity index 100% rename from plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java rename to plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryClientYamlTestSuiteIT.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryClientYamlTestSuiteIT.java new file mode 100644 index 00000000000..8d85b389193 --- /dev/null +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryClientYamlTestSuiteIT.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.gcs; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public class GoogleCloudStorageRepositoryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public GoogleCloudStorageRepositoryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(); + } +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java similarity index 100% rename from plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java rename to plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml b/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml new file mode 100644 index 00000000000..65d02b5fade --- /dev/null +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml @@ -0,0 +1,177 @@ +# Integration tests for repository-gcs +--- +"Snapshot/Restore with repository-gcs": + + # Register repository + - do: + snapshot.create_repository: + repository: repository + body: + type: gcs + settings: + bucket: ${bucket} + client: "integration_test" + base_path: ${base_path} + + - match: { acknowledged: true } + + # Get repository + - do: + snapshot.get_repository: + repository: repository + + - match: { repository.settings.bucket : ${bucket} } + - match: { repository.settings.client : "integration_test" } + - match: { repository.settings.base_path : ${base_path} } + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-one + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository diff --git a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml index 4f63e4b4e45..f4259771644 100644 --- a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml +++ b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml @@ -11,177 +11,3 @@ nodes.info: {} - match: { nodes.$master.plugins.0.name: repository-gcs } ---- -"Snapshot/Restore with repository-gcs": - - # Register repository - - do: - snapshot.create_repository: - repository: repository - body: - type: gcs - settings: - bucket: "bucket_test" - client: "integration_test" - - - match: { acknowledged: true } - - # Get repository - - do: - snapshot.get_repository: - repository: repository - - - match: {repository.settings.bucket : "bucket_test"} - - match: {repository.settings.client : "integration_test"} - - # Index documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _type: doc - _id: 1 - - snapshot: one - - index: - _index: docs - _type: doc - _id: 2 - - snapshot: one - - index: - _index: docs - _type: doc - _id: 3 - - snapshot: one - - - do: - count: - index: docs - - - match: {count: 3} - - # Create a first snapshot - - do: - snapshot.create: - repository: repository - snapshot: snapshot-one - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-one } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.include_global_state: true } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.status: - repository: repository - snapshot: snapshot-one - - - is_true: snapshots - - match: { snapshots.0.snapshot: snapshot-one } - - match: { snapshots.0.state : SUCCESS } - - # Index more documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _type: doc - _id: 4 - - snapshot: two - - index: - _index: docs - _type: doc - _id: 5 - - snapshot: two - - index: - _index: docs - _type: doc - _id: 6 - - snapshot: two - - index: - _index: docs - _type: doc - _id: 7 - - snapshot: two - - - do: - count: - index: docs - - - match: {count: 7} - - # Create a second snapshot - - do: - snapshot.create: - repository: repository - snapshot: snapshot-two - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-two } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.get: - repository: repository - snapshot: snapshot-one,snapshot-two - - - is_true: snapshots - - match: { snapshots.0.state : SUCCESS } - - match: { snapshots.1.state : SUCCESS } - - # Delete the index - - do: - indices.delete: - index: docs - - # Restore the second snapshot - - do: - snapshot.restore: - repository: repository - snapshot: snapshot-two - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 7} - - # Delete the index again - - do: - indices.delete: - index: docs - - # Restore the first snapshot - - do: - snapshot.restore: - repository: repository - snapshot: snapshot-one - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 3} - - # Remove the snapshots - - do: - snapshot.delete: - repository: repository - snapshot: snapshot-two - - - do: - snapshot.delete: - repository: repository - snapshot: snapshot-one - - # Remove our repository - - do: - snapshot.delete_repository: - repository: repository From 811f5b4efc517cf64571ea6390f8ef61173f06a5 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 30 Apr 2018 07:31:36 -0400 Subject: [PATCH 22/68] Do not ignore request analysis/similarity on resize (#30216) Today when a resize operation is performed, we copy the analysis, similarity, and sort settings from the source index. It is possible for the resize request to include additional index settings including analysis, similarity, and sort settings. We reject sort settings when validating the request. However, we silently ignore analysis and similarity settings on the request that are already set on the source index. Since it is possible to change the analysis and similarity settings on an existing index, this should be considered a bug and the sort of leniency that we abhor. This commit addresses this bug by allowing the request analysis/similarity settings to override the existing analysis/similarity settings on the target. --- docs/CHANGELOG.asciidoc | 2 + docs/reference/indices/shrink-index.asciidoc | 7 +- docs/reference/indices/split-index.asciidoc | 7 +- .../metadata/MetaDataCreateIndexService.java | 6 +- .../MetaDataCreateIndexServiceTests.java | 68 +++++++++++++++---- 5 files changed, 70 insertions(+), 20 deletions(-) diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index dc2092ad905..b46799d7a8e 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -41,6 +41,8 @@ written to by an older Elasticsearch after writing to it with a newer Elasticsea === Bug Fixes +Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) + === Regressions === Known Issues diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 027cf8b924d..2dfc2b4f617 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -119,9 +119,10 @@ POST my_source_index/_shrink/my_target_index segment. -NOTE: Mappings may not be specified in the `_shrink` request, and all -`index.analysis.*` and `index.similarity.*` settings will be overwritten with -the settings from the source index. +NOTE: Mappings may not be specified in the `_shrink` request. + +NOTE: By default, with the exception of `index.analysis`, `index.similarity`, and `index.sort` settings, index settings on the source +index are not copied during a shrink operation. [float] === Monitoring the shrink process diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index 4a6fc16103b..8285fa4fa44 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -175,9 +175,10 @@ POST my_source_index/_split/my_target_index number of shards in the source index. -NOTE: Mappings may not be specified in the `_split` request, and all -`index.analysis.*` and `index.similarity.*` settings will be overwritten with -the settings from the source index. +NOTE: Mappings may not be specified in the `_split` request. + +NOTE: By default, with the exception of `index.analysis`, `index.similarity`, and `index.sort` settings, index settings on the source +index are not copied during a shrink operation. [float] === Monitoring the split process diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 690cd1fbe5a..37c27fb9b7b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -56,6 +56,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentHelper; @@ -694,8 +695,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { throw new IllegalStateException("unknown resize type is " + type); } - final Predicate sourceSettingsPredicate = (s) -> s.startsWith("index.similarity.") - || s.startsWith("index.analysis.") || s.startsWith("index.sort."); + final Predicate sourceSettingsPredicate = + (s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || s.startsWith("index.sort.")) + && indexSettingsBuilder.keys().contains(s) == false; indexSettingsBuilder // now copy all similarity / analysis / sort settings - this overrides all settings from the user unless they // wanna add extra settings diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 6074102cde3..28fbfaefe6d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -51,6 +51,7 @@ import java.util.List; import static java.util.Collections.emptyMap; import static java.util.Collections.min; import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.equalTo; public class MetaDataCreateIndexServiceTests extends ESTestCase { @@ -243,7 +244,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { .put("index.version.created", version) .put("index.version.upgraded", upgraded) .put("index.version.minimum_compatible", minCompat.luceneVersion.toString()) - .put("index.analysis.analyzer.my_analyzer.tokenizer", "keyword") + .put("index.analysis.analyzer.default.tokenizer", "keyword") .build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, @@ -257,17 +258,60 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { routingTable.index(indexName).shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - Settings.Builder builder = Settings.builder(); - builder.put("index.number_of_shards", 1); - MetaDataCreateIndexService.prepareResizeIndexSettings(clusterState, Collections.emptySet(), builder, - clusterState.metaData().index(indexName).getIndex(), "target", ResizeType.SHRINK); - assertEquals("similarity settings must be copied", "BM25", builder.build().get("index.similarity.default.type")); - assertEquals("analysis settings must be copied", - "keyword", builder.build().get("index.analysis.analyzer.my_analyzer.tokenizer")); - assertEquals("node1", builder.build().get("index.routing.allocation.initial_recovery._id")); - assertEquals("1", builder.build().get("index.allocation.max_retries")); - assertEquals(version, builder.build().getAsVersion("index.version.created", null)); - assertEquals(upgraded, builder.build().getAsVersion("index.version.upgraded", null)); + { + final Settings.Builder builder = Settings.builder(); + builder.put("index.number_of_shards", 1); + MetaDataCreateIndexService.prepareResizeIndexSettings( + clusterState, + Collections.emptySet(), + builder, + clusterState.metaData().index(indexName).getIndex(), + "target", + ResizeType.SHRINK); + final Settings settings = builder.build(); + assertThat("similarity settings must be copied", settings.get("index.similarity.default.type"), equalTo("BM25")); + assertThat( + "analysis settings must be copied", settings.get("index.analysis.analyzer.default.tokenizer"), equalTo("keyword")); + assertThat(settings.get("index.routing.allocation.initial_recovery._id"), equalTo("node1")); + assertThat(settings.get("index.allocation.max_retries"), equalTo("1")); + assertThat(settings.getAsVersion("index.version.created", null), equalTo(version)); + assertThat(settings.getAsVersion("index.version.upgraded", null), equalTo(upgraded)); + } + + // analysis settings from the request are not overwritten + { + final Settings.Builder builder = Settings.builder(); + builder.put("index.number_of_shards", 1); + builder.put("index.analysis.analyzer.default.tokenizer", "whitespace"); + MetaDataCreateIndexService.prepareResizeIndexSettings( + clusterState, + Collections.emptySet(), + builder, + clusterState.metaData().index(indexName).getIndex(), + "target", + ResizeType.SHRINK); + final Settings settings = builder.build(); + assertThat( + "analysis settings are not overwritten", + settings.get("index.analysis.analyzer.default.tokenizer"), + equalTo("whitespace")); + } + + // similarity settings from the request are not overwritten + { + final Settings.Builder builder = Settings.builder(); + builder.put("index.number_of_shards", 1); + builder.put("index.similarity.default.type", "DFR"); + MetaDataCreateIndexService.prepareResizeIndexSettings( + clusterState, + Collections.emptySet(), + builder, + clusterState.metaData().index(indexName).getIndex(), + "target", + ResizeType.SHRINK); + final Settings settings = builder.build(); + assertThat("similarity settings are not overwritten", settings.get("index.similarity.default.type"), equalTo("DFR")); + } } private DiscoveryNode newNode(String nodeId) { From 5e4d0b45106c4b893e952e2b476049dc294a931d Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 30 Apr 2018 13:09:03 +0100 Subject: [PATCH 23/68] Minor tweaks to reroute documentation (#30246) Add yet another warning about data loss to the introductory paragraph about the unsafe commands. Also move this paragraph next to the details of the unsafe commands, below the section on the `retry_failed` flag. Be more specific about how to use the URI parameters and in-body flags. Clarify statements about when rebalancing takes place (i.e. it respects settings) Resolves #16113. --- docs/reference/cluster/reroute.asciidoc | 135 ++++++++++++------------ 1 file changed, 70 insertions(+), 65 deletions(-) diff --git a/docs/reference/cluster/reroute.asciidoc b/docs/reference/cluster/reroute.asciidoc index 0bc8610e0c7..f076a7b8358 100644 --- a/docs/reference/cluster/reroute.asciidoc +++ b/docs/reference/cluster/reroute.asciidoc @@ -1,13 +1,12 @@ [[cluster-reroute]] == Cluster Reroute -The reroute command allows to explicitly execute a cluster reroute -allocation command including specific commands. For example, a shard can -be moved from one node to another explicitly, an allocation can be -canceled, or an unassigned shard can be explicitly allocated on a -specific node. +The reroute command allows for manual changes to the allocation of individual +shards in the cluster. For example, a shard can be moved from one node to +another explicitly, an allocation can be cancelled, and an unassigned shard can +be explicitly allocated to a specific node. -Here is a short example of how a simple reroute API call: +Here is a short example of a simple reroute API call: [source,js] -------------------------------------------------- @@ -32,59 +31,53 @@ POST /_cluster/reroute // CONSOLE // TEST[skip:doc tests run with only a single node] -An important aspect to remember is the fact that once when an allocation -occurs, the cluster will aim at re-balancing its state back to an even -state. For example, if the allocation includes moving a shard from -`node1` to `node2`, in an `even` state, then another shard will be moved -from `node2` to `node1` to even things out. +It is important to note that that after processing any reroute commands +Elasticsearch will perform rebalancing as normal (respecting the values of +settings such as `cluster.routing.rebalance.enable`) in order to remain in a +balanced state. For example, if the requested allocation includes moving a +shard from `node1` to `node2` then this may cause a shard to be moved from +`node2` back to `node1` to even things out. -The cluster can be set to disable allocations, which means that only the -explicitly allocations will be performed. Obviously, only once all -commands has been applied, the cluster will aim to be re-balance its -state. +The cluster can be set to disable allocations using the +`cluster.routing.allocation.enable` setting. If allocations are disabled then +the only allocations that will be performed are explicit ones given using the +`reroute` command, and consequent allocations due to rebalancing. -Another option is to run the commands in `dry_run` (as a URI flag, or in -the request body). This will cause the commands to apply to the current -cluster state, and return the resulting cluster after the commands (and -re-balancing) has been applied. +It is possible to run `reroute` commands in "dry run" mode by using the +`?dry_run` URI query parameter, or by passing `"dry_run": true` in the request +body. This will calculate the result of applying the commands to the current +cluster state, and return the resulting cluster state after the commands (and +re-balancing) has been applied, but will not actually perform the requested +changes. -If the `explain` parameter is specified, a detailed explanation of why the -commands could or could not be executed is returned. +If the `?explain` URI query parameter is included then a detailed explanation +of why the commands could or could not be executed is included in the response. The commands supported are: `move`:: Move a started shard from one node to another node. Accepts `index` and `shard` for index name and shard number, `from_node` for the - node to move the shard `from`, and `to_node` for the node to move the + node to move the shard from, and `to_node` for the node to move the shard to. `cancel`:: - Cancel allocation of a shard (or recovery). Accepts `index` - and `shard` for index name and shard number, and `node` for the node to - cancel the shard allocation on. It also accepts `allow_primary` flag to - explicitly specify that it is allowed to cancel allocation for a primary - shard. This can be used to force resynchronization of existing replicas - from the primary shard by cancelling them and allowing them to be - reinitialized through the standard reallocation process. + Cancel allocation of a shard (or recovery). Accepts `index` and `shard` for + index name and shard number, and `node` for the node to cancel the shard + allocation on. This can be used to force resynchronization of existing + replicas from the primary shard by cancelling them and allowing them to be + reinitialized through the standard recovery process. By default only + replica shard allocations can be cancelled. If it is necessary to cancel + the allocation of a primary shard then the `allow_primary` flag must also + be included in the request. `allocate_replica`:: - Allocate an unassigned replica shard to a node. Accepts the - `index` and `shard` for index name and shard number, and `node` to - allocate the shard to. Takes <> into account. - -Two more commands are available that allow the allocation of a primary shard -to a node. These commands should however be used with extreme care, as primary -shard allocation is usually fully automatically handled by Elasticsearch. -Reasons why a primary shard cannot be automatically allocated include the following: - -- A new index was created but there is no node which satisfies the allocation deciders. -- An up-to-date shard copy of the data cannot be found on the current data nodes in -the cluster. To prevent data loss, the system does not automatically promote a stale -shard copy to primary. + Allocate an unassigned replica shard to a node. Accepts `index` and `shard` + for index name and shard number, and `node` to allocate the shard to. Takes + <> into account. [float] -=== Retry failed shards +=== Retrying failed allocations The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving @@ -93,36 +86,48 @@ structural problems such as having an analyzer which refers to a stopwords file which doesn't exist on all nodes. Once the problem has been corrected, allocation can be manually retried by -calling the <> API with `?retry_failed`, which -will attempt a single retry round for these shards. +calling the <> API with the `?retry_failed` URI +query parameter, which will attempt a single retry round for these shards. [float] === Forced allocation on unrecoverable errors +Two more commands are available that allow the allocation of a primary shard to +a node. These commands should however be used with extreme care, as primary +shard allocation is usually fully automatically handled by Elasticsearch. +Reasons why a primary shard cannot be automatically allocated include the +following: + +- A new index was created but there is no node which satisfies the allocation + deciders. +- An up-to-date shard copy of the data cannot be found on the current data + nodes in the cluster. To prevent data loss, the system does not automatically +promote a stale shard copy to primary. + The following two commands are dangerous and may result in data loss. They are -meant to be used in cases where the original data can not be recovered and the cluster -administrator accepts the loss. If you have suffered a temporary issue that has been -fixed, please see the `retry_failed` flag described above. +meant to be used in cases where the original data can not be recovered and the +cluster administrator accepts the loss. If you have suffered a temporary issue +that can be fixed, please see the `retry_failed` flag described above. To +emphasise: if these commands are performed and then a node joins the cluster +that holds a copy of the affected shard then the copy on the newly-joined node +will be deleted or overwritten. `allocate_stale_primary`:: Allocate a primary shard to a node that holds a stale copy. Accepts the - `index` and `shard` for index name and shard number, and `node` to - allocate the shard to. Using this command may lead to data loss - for the provided shard id. If a node which has the good copy of the - data rejoins the cluster later on, that data will be overwritten with - the data of the stale copy that was forcefully allocated with this - command. To ensure that these implications are well-understood, - this command requires the special field `accept_data_loss` to be - explicitly set to `true` for it to work. + `index` and `shard` for index name and shard number, and `node` to allocate + the shard to. Using this command may lead to data loss for the provided + shard id. If a node which has the good copy of the data rejoins the cluster + later on, that data will be deleted or overwritten with the data of the + stale copy that was forcefully allocated with this command. To ensure that + these implications are well-understood, this command requires the flag + `accept_data_loss` to be explicitly set to `true`. `allocate_empty_primary`:: - Allocate an empty primary shard to a node. Accepts the - `index` and `shard` for index name and shard number, and `node` to - allocate the shard to. Using this command leads to a complete loss - of all data that was indexed into this shard, if it was previously - started. If a node which has a copy of the - data rejoins the cluster later on, that data will be deleted! - To ensure that these implications are well-understood, - this command requires the special field `accept_data_loss` to be - explicitly set to `true` for it to work. + Allocate an empty primary shard to a node. Accepts the `index` and `shard` + for index name and shard number, and `node` to allocate the shard to. Using + this command leads to a complete loss of all data that was indexed into + this shard, if it was previously started. If a node which has a copy of the + data rejoins the cluster later on, that data will be deleted. To ensure + that these implications are well-understood, this command requires the flag + `accept_data_loss` to be explicitly set to `true`. From 50945051b69f3b02a85111593f3d9b9d46ef4b3e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 30 Apr 2018 09:39:50 -0400 Subject: [PATCH 24/68] HTML5ify Javadoc for core and test framework (#30234) `javadoc` will switch from detaulting to html4 to html5 in "a future release". We should get ahead of it so we're not surprised. Also, HTML5 is the future! Er, the present. Anyway, this follows up from #30220 to make the Javadoc for two of the four remaining projects HTML5 compatible. --- .../elasticsearch/gradle/BuildPlugin.groovy | 2 -- .../org/elasticsearch/common/Booleans.java | 12 ++++---- .../core/internal/io/IOUtils.java | 8 ++--- .../elasticsearch/test/ESIntegTestCase.java | 30 +++++++++---------- .../test/InternalTestCluster.java | 2 +- .../elasticsearch/test/XContentTestUtils.java | 2 +- .../test/engine/MockEngineSupport.java | 11 +++---- .../test/transport/MockTransportService.java | 4 +-- 8 files changed, 34 insertions(+), 37 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index d59d25ddc55..4b709508662 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -561,8 +561,6 @@ class BuildPlugin implements Plugin { */ List html4Projects = [ ':server', - ':libs:elasticsearch-core', - ':test:framework', ':x-pack:plugin:core', ] if (false == html4Projects.contains(project.path)) { diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java index 7447f0111f7..86877857965 100644 --- a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java @@ -107,7 +107,7 @@ public final class Booleans { } /** - * Returns false if text is in false, 0, off, no; else, true + * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. * * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, Boolean)} instead. */ @@ -119,9 +119,7 @@ public final class Booleans { return parseBooleanLenient(value, false); } /** - * Returns true iff the value is neither of the following: - * false, 0, off, no - * otherwise false + * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. * * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, boolean)} instead. */ @@ -134,21 +132,21 @@ public final class Booleans { } /** - * @return true iff the value is false, otherwise false. + * @return {@code true} iff the value is "false", otherwise {@code false}. */ public static boolean isFalse(String value) { return "false".equals(value); } /** - * @return true iff the value is true, otherwise false + * @return {@code true} iff the value is "true", otherwise {@code false}. */ public static boolean isTrue(String value) { return "true".equals(value); } /** - * Returns false if text is in false, 0, off, no; else, true + * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. * * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(char[], int, int, boolean)} instead */ diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java index 4108992fb1f..67663516167 100644 --- a/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java @@ -37,11 +37,11 @@ import java.util.Map; public final class IOUtils { private IOUtils() { - + // Static utils methods } /** - * Closes all given Closeables. Some of the Closeables may be null; they are + * Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are * ignored. After everything is closed, the method either throws the first exception it hit * while closing with other exceptions added as suppressed, or completes normally if there were * no exceptions. @@ -53,7 +53,7 @@ public final class IOUtils { } /** - * Closes all given Closeables. Some of the Closeables may be null; they are + * Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are * ignored. After everything is closed, the method adds any exceptions as suppressed to the * original exception, or throws the first exception it hit if {@code Exception} is null. If * no exceptions are encountered and the passed in exception is null, it completes normally. @@ -65,7 +65,7 @@ public final class IOUtils { } /** - * Closes all given Closeables. Some of the Closeables may be null; they are + * Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are * ignored. After everything is closed, the method either throws the first exception it hit * while closing with other exceptions added as suppressed, or completes normally if there were * no exceptions. diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 2d027e8bfec..a7fd6768064 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1352,7 +1352,7 @@ public abstract class ESIntegTestCase extends ESTestCase { * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index * layout. * - * @param forceRefresh if true all involved indices are refreshed once the documents are indexed. Additionally if true + * @param forceRefresh if {@code true} all involved indices are refreshed once the documents are indexed. Additionally if {@code true} * some empty dummy documents are may be randomly inserted into the document list and deleted once all documents are indexed. * This is useful to produce deleted documents on the server side. * @param builders the documents to index. @@ -1369,8 +1369,8 @@ public abstract class ESIntegTestCase extends ESTestCase { * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index * layout. * - * @param forceRefresh if true all involved indices are refreshed once the documents are indexed. - * @param dummyDocuments if true some empty dummy documents may be randomly inserted into the document list and deleted once + * @param forceRefresh if {@code true} all involved indices are refreshed once the documents are indexed. + * @param dummyDocuments if {@code true} some empty dummy documents may be randomly inserted into the document list and deleted once * all documents are indexed. This is useful to produce deleted documents on the server side. * @param builders the documents to index. */ @@ -1385,10 +1385,10 @@ public abstract class ESIntegTestCase extends ESTestCase { * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index * layout. * - * @param forceRefresh if true all involved indices are refreshed once the documents are indexed. - * @param dummyDocuments if true some empty dummy documents may be randomly inserted into the document list and deleted once + * @param forceRefresh if {@code true} all involved indices are refreshed once the documents are indexed. + * @param dummyDocuments if {@code true} some empty dummy documents may be randomly inserted into the document list and deleted once * all documents are indexed. This is useful to produce deleted documents on the server side. - * @param maybeFlush if true this method may randomly execute full flushes after index operations. + * @param maybeFlush if {@code true} this method may randomly execute full flushes after index operations. * @param builders the documents to index. */ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List builders) throws InterruptedException, ExecutionException { @@ -1554,27 +1554,27 @@ public abstract class ESIntegTestCase extends ESTestCase { Scope scope() default Scope.SUITE; /** - * Returns the number of nodes in the cluster. Default is -1 which means + * Returns the number of nodes in the cluster. Default is {@code -1} which means * a random number of nodes is used, where the minimum and maximum number of nodes * are either the specified ones or the default ones if not specified. */ int numDataNodes() default -1; /** - * Returns the minimum number of data nodes in the cluster. Default is -1. + * Returns the minimum number of data nodes in the cluster. Default is {@code -1}. * Ignored when {@link ClusterScope#numDataNodes()} is set. */ int minNumDataNodes() default -1; /** - * Returns the maximum number of data nodes in the cluster. Default is -1. + * Returns the maximum number of data nodes in the cluster. Default is {@code -1}. * Ignored when {@link ClusterScope#numDataNodes()} is set. */ int maxNumDataNodes() default -1; /** - * Indicates whether the cluster can have dedicated master nodes. If false means data nodes will serve as master nodes - * and there will be no dedicated master (and data) nodes. Default is true which means + * Indicates whether the cluster can have dedicated master nodes. If {@code false} means data nodes will serve as master nodes + * and there will be no dedicated master (and data) nodes. Default is {@code false} which means * dedicated master nodes will be randomly used. */ boolean supportsDedicatedMasters() default true; @@ -1703,7 +1703,7 @@ public abstract class ESIntegTestCase extends ESTestCase { } /** - * This method is used to obtain settings for the Nth node in the cluster. + * This method is used to obtain settings for the {@code N}th node in the cluster. * Nodes in this cluster are associated with an ordinal number such that nodes can * be started with specific configurations. This method might be called multiple * times with the same ordinal and is expected to return the same value for each invocation. @@ -1878,7 +1878,7 @@ public abstract class ESIntegTestCase extends ESTestCase { /** * Iff this returns true mock transport implementations are used for the test runs. Otherwise not mock transport impls are used. - * The default is true + * The default is {@code true}. */ protected boolean addMockTransportService() { return true; @@ -1886,7 +1886,7 @@ public abstract class ESIntegTestCase extends ESTestCase { /** * Iff this returns true test zen discovery implementations is used for the test runs. - * The default is true + * The default is {@code true}. */ protected boolean addTestZenDiscovery() { return true; @@ -1957,7 +1957,7 @@ public abstract class ESIntegTestCase extends ESTestCase { /** * Returns the transport client ratio from the class level annotation or via * {@link System#getProperty(String)} if available. If both are not available this will - * return a random ratio in the interval [0..1] + * return a random ratio in the interval {@code [0..1]}. */ protected double getPerTestTransportClientRatio() { final ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 12acd21903e..5099fc0540d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1978,7 +1978,7 @@ public final class InternalTestCluster extends TestCluster { } /** - * Executed for each node before the n+1 node is restarted. The given client is + * Executed for each node before the {@code n + 1} node is restarted. The given client is * an active client to the node that will be restarted next. */ public void doAfterNodes(int n, Client client) throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java index 15c650173bf..724a99f2c94 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java @@ -145,7 +145,7 @@ public final class XContentTestUtils { * If the xContent output contains objects that should be skipped of such treatment, an optional filtering * {@link Predicate} can be supplied that checks xContent paths that should be excluded from this treatment. * - * This predicate should check the xContent path that we want to insert to and return true if the + * This predicate should check the xContent path that we want to insert to and return {@code true} if the * path should be excluded. Paths are string concatenating field names and array indices, so e.g. in: * *
diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
index bf0b7376b81..fc2a85b35a9 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
@@ -19,14 +19,13 @@
 package org.elasticsearch.test.engine;
 
 import org.apache.logging.log4j.Logger;
+import org.apache.lucene.index.AssertingDirectoryReader;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.FilterDirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.search.AssertingIndexSearcher;
-import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.QueryCache;
 import org.apache.lucene.search.QueryCachingPolicy;
-import org.apache.lucene.search.ReferenceManager;
 import org.apache.lucene.util.LuceneTestCase;
 import org.elasticsearch.ElasticsearchException;
 import org.elasticsearch.common.logging.Loggers;
@@ -38,6 +37,7 @@ import org.elasticsearch.index.engine.EngineConfig;
 import org.elasticsearch.index.engine.EngineException;
 import org.elasticsearch.index.shard.ShardId;
 import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.engine.MockInternalEngine;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -47,14 +47,15 @@ import java.util.Random;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
- * Support class to build MockEngines like {@link org.elasticsearch.test.engine.MockInternalEngine}
+ * Support class to build MockEngines like {@link MockInternalEngine}
  * since they need to subclass the actual engine
  */
 public final class MockEngineSupport {
 
     /**
-     * Allows tests to wrap an index reader randomly with a given ratio. This is disabled by default ie. 0.0d since reader wrapping is insanely
-     * slow if {@link org.apache.lucene.index.AssertingDirectoryReader} is used.
+     * Allows tests to wrap an index reader randomly with a given ratio. This
+     * is disabled by default ie. {@code 0.0d} since reader wrapping is insanely
+     * slow if {@link AssertingDirectoryReader} is used.
      */
     public static final Setting WRAP_READER_RATIO =
         Setting.doubleSetting("index.engine.mock.random.wrap_reader_ratio", 0.0d, 0.0d, Property.IndexScope);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
index 1c31533c933..6654444066d 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
@@ -474,7 +474,7 @@ public final class MockTransportService extends TransportService {
     /**
      * Adds a new delegate transport that is used for communication with the given transport service.
      *
-     * @return true iff no other delegate was registered for any of the addresses bound by transport service.
+     * @return {@code true} iff no other delegate was registered for any of the addresses bound by transport service.
      */
     public boolean addDelegate(TransportService transportService, DelegateTransport transport) {
         boolean noRegistered = true;
@@ -487,7 +487,7 @@ public final class MockTransportService extends TransportService {
     /**
      * Adds a new delegate transport that is used for communication with the given transport address.
      *
-     * @return true iff no other delegate was registered for this address before.
+     * @return {@code true} iff no other delegate was registered for this address before.
      */
     public boolean addDelegate(TransportAddress transportAddress, DelegateTransport transport) {
         return transport().transports.put(transportAddress, transport) == null;

From 725a5af2c6743cdb744539cc0cbfb8ec51bc848b Mon Sep 17 00:00:00 2001
From: Chris Earle 
Date: Mon, 30 Apr 2018 10:16:11 -0400
Subject: [PATCH 25/68] _cluster/state should always return cluster_uuid
 (#30143)

Currently, the only way to get the REST response for the `/_cluster/state`
call to return the `cluster_uuid` is to request the `metadata` metrics,
which is one of the most expensive response structures. However, external
monitoring agents will likely want the `cluster_uuid` to correlate the
response with other API responses whether or not they want cluster
metadata.
---
 docs/reference/cluster/state.asciidoc           |  6 ++++++
 .../test/cluster.state/10_basic.yml             | 17 ++++++++++++++++-
 .../test/cluster.state/20_filtering.yml         | 16 ++++++++++++++++
 .../org/elasticsearch/cluster/ClusterState.java |  3 +++
 .../reroute/ClusterRerouteResponseTests.java    |  3 +++
 .../cluster/ClusterStatsMonitoringDocTests.java |  4 ++++
 6 files changed, 48 insertions(+), 1 deletion(-)

diff --git a/docs/reference/cluster/state.asciidoc b/docs/reference/cluster/state.asciidoc
index d0ff3290c74..a20ff04d83f 100644
--- a/docs/reference/cluster/state.asciidoc
+++ b/docs/reference/cluster/state.asciidoc
@@ -15,6 +15,12 @@ of the cluster state (its size when serialized for transmission over
 the network), and the cluster state itself, which can be filtered to
 only retrieve the parts of interest, as described below.
 
+The cluster's `cluster_uuid` is also returned as part of the top-level
+response, in addition to the `metadata` section. added[6.4.0]
+
+NOTE: While the cluster is still forming, it is possible for the `cluster_uuid`
+      to be `_na_` as well as the cluster state's version to be `-1`.
+
 By default, the cluster state request is routed to the master node, to
 ensure that the latest cluster state is returned.   
 For debugging purposes, you can retrieve the cluster state local to a
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml
index e3af21412ca..c38a33fdff1 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml
@@ -2,7 +2,7 @@
 "get cluster state":
   - do:
       cluster.state: {}
-  
+
   - is_true: master_node
 
 ---
@@ -18,3 +18,18 @@
   - is_true: master_node
   - gte: { compressed_size_in_bytes: 50 }
   - is_true: compressed_size
+
+---
+"get cluster state returns cluster_uuid at the top level":
+  - skip:
+      version:  " - 6.99.99"
+      reason:   "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher"
+
+  - do:
+      cluster.state:
+        human: true
+
+  - is_true: cluster_uuid
+  - is_true: master_node
+  - gte: { compressed_size_in_bytes: 50 }
+  - is_true: compressed_size
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml
index 1e1d5712560..f9e5a0c03df 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml
@@ -156,3 +156,19 @@ setup:
   - is_true: routing_table.indices.index1
   - is_true: metadata.indices.index2
   - is_true: routing_table.indices.index2
+
+---
+"Filtering the cluster state returns cluster_uuid at the top level regardless of metric filters":
+  - skip:
+      version:  " - 6.99.99"
+      reason:   "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher"
+
+  - do:
+      cluster.state:
+        metric: [ master_node, version, metadata ]
+
+  - is_true: cluster_uuid
+  - is_true: master_node
+  - is_true: version
+  - is_true: state_uuid
+  - is_true: metadata
diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java
index 30c8df07ec1..2b991d1dc61 100644
--- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java
+++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java
@@ -326,6 +326,9 @@ public class ClusterState implements ToXContentFragment, Diffable
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
         EnumSet metrics = Metric.parseString(params.param("metric", "_all"), true);
 
+        // always provide the cluster_uuid as part of the top-level response (also part of the metadata response)
+        builder.field("cluster_uuid", metaData().clusterUUID());
+
         if (metrics.contains(Metric.VERSION)) {
             builder.field("version", version);
             builder.field("state_uuid", stateUUID);
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java
index 4ced505717a..7d671096514 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java
@@ -69,6 +69,7 @@ public class ClusterRerouteResponseTests extends ESTestCase {
             assertEquals("{\n" +
                     "  \"acknowledged\" : true,\n" +
                     "  \"state\" : {\n" +
+                    "    \"cluster_uuid\" : \"_na_\",\n" +
                     "    \"version\" : 0,\n" +
                     "    \"state_uuid\" : \"" + clusterState.stateUUID() + "\",\n" +
                     "    \"master_node\" : \"node0\",\n" +
@@ -136,6 +137,7 @@ public class ClusterRerouteResponseTests extends ESTestCase {
             assertEquals("{\n" +
                     "  \"acknowledged\" : true,\n" +
                     "  \"state\" : {\n" +
+                    "    \"cluster_uuid\" : \"_na_\",\n" +
                     "    \"version\" : 0,\n" +
                     "    \"state_uuid\" : \"" + clusterState.stateUUID() + "\",\n" +
                     "    \"master_node\" : \"node0\"\n" +
@@ -168,6 +170,7 @@ public class ClusterRerouteResponseTests extends ESTestCase {
             assertEquals("{\n" +
                     "  \"acknowledged\" : true,\n" +
                     "  \"state\" : {\n" +
+                    "    \"cluster_uuid\" : \"_na_\",\n" +
                     "    \"metadata\" : {\n" +
                     "      \"cluster_uuid\" : \"_na_\",\n" +
                     "      \"templates\" : { },\n" +
diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java
index 4a096f0ca4a..098f4190b0e 100644
--- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java
+++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java
@@ -17,6 +17,7 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats;
 import org.elasticsearch.cluster.ClusterName;
 import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.cluster.metadata.MetaData;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.node.DiscoveryNodes;
 import org.elasticsearch.cluster.routing.ShardRouting;
@@ -188,6 +189,7 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase
Date: Mon, 30 Apr 2018 08:04:15 -0700
Subject: [PATCH 26/68] [DOCS] Removes redundant LDAP realm settings (#30193)

---
 .../authentication/ldap-realm.asciidoc        | 202 +-----------------
 .../en/settings/security-settings.asciidoc    | 167 ++++++++++-----
 2 files changed, 111 insertions(+), 258 deletions(-)

diff --git a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc
index bd32c496228..15b014183aa 100644
--- a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc
+++ b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc
@@ -137,211 +137,13 @@ The `load_balance.type` setting can be used at the realm level to configure how
 {security} should interact with multiple LDAP servers. {security} supports both
 failover and load balancing modes of operation.
 
-.Load Balancing and Failover Types
-|=======================
-| Type              | | | Description
-| `failover`        | | | The URLs specified are used in the order that they are specified.
-                          The first server that can be connected to will be used for all
-                          subsequent connections. If a connection to that server fails then
-                          the next server that a connection can be established to will be
-                          used for subsequent connections.
-| `dns_failover`    | | | In this mode of operation, only a single URL may be specified.
-                          This URL must contain a DNS name. The system will be queried for
-                          all IP addresses that correspond to this DNS name. Connections to
-                          the LDAP server will always be tried in the order in which they
-                          were retrieved. This differs from `failover` in that there is no
-                          reordering of the list and if a server has failed at the beginning
-                          of the list, it will still be tried for each subsequent connection.
-| `round_robin`     | | | Connections will continuously iterate through the list of provided
-                          URLs. If a server is unavailable, iterating through the list of
-                          URLs will continue until a successful connection is made.
-| `dns_round_robin` | | | In this mode of operation, only a single URL may be specified. This
-                          URL must contain a DNS name. The system will be queried for all IP
-                          addresses that correspond to this DNS name. Connections will
-                          continuously iterate through the list of addresses. If a server is
-                          unavailable, iterating through the list of URLs will continue until
-                          a successful connection is made.
-|=======================
+See {ref}/security-settings.html#load-balancing[Load Balancing and Failover Settings].
 
 
 [[ldap-settings]]
 ===== LDAP Realm Settings
 
-.Common LDAP Realm Settings
-[cols="4,^3,10"]
-|=======================
-| Setting                        | Required | Description
-| `type`                         | yes      | Indicates the realm type. Must be set to `ldap`.
-| `order`                        | no       | Indicates the priority of this realm within the realm
-                                              chain. Realms with a lower order are consulted first.
-                                              Although not required, we recommend explicitly
-                                              setting this value when you configure multiple realms.
-                                              Defaults to `Integer.MAX_VALUE`.
-| `enabled`                      | no       | Indicates whether this realm is enabled or disabled.
-                                              Enables you to disable a realm without removing its
-                                              configuration. Defaults to `true`.
-| `url`                          | yes      | Specifies one or more LDAP URLs of the form of
-                                              `ldap[s]://:`. Multiple URLs can be
-                                              defined using a comma separated value or array syntax:
-                                              `[ "ldaps://server1:636", "ldaps://server2:636" ]`.
-                                              `ldaps` and `ldap` URL protocols cannot be mixed in
-                                              the same realm.
-| `load_balance.type`            | no       | The behavior to use when there are multiple LDAP URLs
-                                              defined. For supported values see
-                                              <>.
-| `load_balance.cache_ttl`       | no       | When using `dns_failover` or `dns_round_robin` as the
-                                              load balancing type, this setting controls the amount of time
-                                              to cache DNS lookups. Defaults to `1h`.
-| `user_group_attribute`         | no       | Specifies the attribute to examine on the user for group
-                                              membership. The default is `memberOf`. This setting will
-                                              be ignored if any `group_search` settings are specified.
-| `group_search.base_dn`         | no       | Specifies a container DN to search for groups in which
-                                              the user has membership. When this element is absent,
-                                              Security searches for the attribute specified by
-                                              `user_group_attribute` set on the user to determine
-                                              group membership.
-| `group_search.scope`           | no       | Specifies whether the group search should be
-                                              `sub_tree`, `one_level` or `base`.  `one_level` only
-                                              searches objects directly contained within the
-                                              `base_dn`. The default `sub_tree` searches all objects
-                                              contained under `base_dn`. `base` specifies that the
-                                              `base_dn` is a group object, and that it is the only
-                                              group considered.
-| `group_search.filter`          | no       | Specifies a filter to use to lookup a group. If not
-                                              set, the realm searches for `group`,
-                                              `groupOfNames`, `groupOfUniqueNames`, or `posixGroup` with the
-                                              attributes `member`, `memberOf`, or `memberUid`. Any instance of
-                                              `{0}` in the filter is replaced by the user
-                                              attribute defined in `group_search.user_attribute`
-| `group_search.user_attribute`  | no       | Specifies the user attribute that is fetched and
-                                              provided as a parameter to the filter.  If not set,
-                                              the user DN is passed to the filter.
-| `unmapped_groups_as_roles`     | no       | Specifies whether the names of any unmapped LDAP groups
-                                              should be used as role names and assigned to the user.
-                                              A group is considered to be _unmapped_ if it is not referenced
-                                              in any <> (API based
-                                              role-mappings are not considered).
-                                              Defaults to `false`.
-| `timeout.tcp_connect`          | no       | Specifies the TCP connect timeout period for establishing an
-                                              LDAP connection. An `s` at the end indicates seconds, or `ms`
-                                              indicates milliseconds. Defaults to `5s` (5 seconds).
-| `timeout.tcp_read`             | no       | Specifies the TCP read timeout period after establishing an LDAP connection.
-                                              An `s` at the end indicates seconds, or `ms` indicates milliseconds.
-                                              Defaults to `5s` (5 seconds).
-| `timeout.ldap_search`          | no       | Specifies the LDAP Server enforced timeout period for an LDAP search.
-                                              An `s` at the end indicates seconds, or `ms` indicates milliseconds.
-                                              Defaults to `5s` (5 seconds).
-| `files.role_mapping`           | no       | Specifies the path and file name for the
-                                              <>.
-                                              Defaults to `ES_HOME/config/x-pack/role_mapping.yml`.
-| `follow_referrals`             | no       | Specifies whether {security} should follow referrals
-                                              returned by the LDAP server. Referrals are URLs returned by
-                                              the server that are to be used to continue the LDAP operation
-                                              (e.g. search). Defaults to `true`.
-| `metadata`                     | no       | Specifies the list of additional LDAP attributes that should
-                                              be stored in the `metadata` of an authenticated user.
-| `ssl.key`                      | no       | Specifies the path to the PEM encoded private key to use if the LDAP
-                                              server requires client authentication. `ssl.key` and `ssl.keystore.path`
-                                              may not be used at the same time.
-| `ssl.key_passphrase`           | no       | Specifies the passphrase to decrypt the PEM encoded private key if it is encrypted.
-| `ssl.certificate`              | no       | Specifies the path to the PEM encoded certificate (or certificate chain) that goes with the
-                                              key if the LDAP server requires client authentication.
-| `ssl.certificate_authorities`  | no       | Specifies the paths to the PEM encoded certificate authority certificates that
-                                              should be trusted. `ssl.certificate_authorities` and `ssl.truststore.path` may not be used
-                                              at the same time.
-| `ssl.keystore.path`            | no       | The path to the Java Keystore file that contains a private key and certificate. `ssl.key` and
-                                              `ssl.keystore.path` may not be used at the same time.
-| `ssl.keystore.password`        | no       | The password to the keystore.
-| `ssl.keystore.key_password`    | no       | The password for the key in the keystore. Defaults to the keystore password.
-| `ssl.truststore.path`          | no       | The path to the Java Keystore file that contains the certificates to trust.
-                                              `ssl.certificate_authorities` and `ssl.truststore.path` may not be used at the same time.
-| `ssl.truststore.password`      | no       | The password to the truststore.
-| `ssl.verification_mode`        | no       | Specifies the type of verification to be performed when
-                                              connecting to a LDAP server using `ldaps`. When
-                                              set to `full`, the hostname or IP address used in the `url`
-                                              must match one of the names in the certificate or the
-                                              connection will not be allowed. Due to their potential security impact,
-                                              `ssl` settings are not exposed via the
-                                              {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
-                                              Values are `none`, `certificate`, and `full`. Defaults to `full`.
-                                              See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`]
-                                              for an explanation of these values.
-| `ssl.supported_protocols`      | no       | Specifies the supported protocols for SSL/TLS.
-| `ssl.cipher_suites`            | no       | Specifies the cipher suites that should be supported when communicating
-                                              with the LDAP server.
-| `cache.ttl`                | no           | Specifies the time-to-live for cached user entries. A
-                                              user's credentials are cached for this period of time.
-                                              Specify the time period using the standard Elasticsearch
-                                              {ref}/common-options.html#time-units[time units].
-                                              Defaults to `20m`.
-| `cache.max_users`          | no           | Specifies the maximum number of user entries that can be
-                                              stored in the cache at one time. Defaults to 100,000.
-| `cache.hash_algo`          | no           | Specifies the hashing algorithm that is used for the
-                                              cached user credentials. See
-                                              <> for the possible
-                                              values. (Expert Setting).
-|=======================
-
-.User Search Mode Settings
-|=======================
-| Setting                                  | Required | Description
-| `bind_dn`                                | no       | The DN of the user that is used to bind to the LDAP
-                                                        and perform searches. If not specified, an anonymous
-                                                        bind is attempted. Due to its potential security
-                                                        impact, `bind_dn` is not exposed via the
-                                                        {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
-| `bind_password`                          | no       | The password for the user that is used to bind to the
-                                                        LDAP directory. Due to its potential security impact,
-                                                        `bind_password` is not exposed via the
-                                                        {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
-                                                        *Deprecated.* Use `secure_bind_password` instead. 
-| `secure_bind_password`                   | no       | ({ref}/secure-settings.html[Secure])
-                                                        The password for the user that is used to bind to LDAP directory.
-| `user_search.base_dn`                    | yes      | Specifies a container DN to search for users.
-| `user_search.scope`                      | no       | The scope of the user search. Valid values are `sub_tree`,
-                                                        `one_level` or `base`. `one_level` only searches objects
-                                                        directly contained within the `base_dn`. `sub_tree` searches
-                                                        all objects contained under `base_dn`. `base` specifies
-                                                        that the `base_dn` is the user object, and that it is the
-                                                        only user considered. Defaults to `sub_tree`.
-| `user_search.filter`                     | no       | Specifies the filter used to search the directory in attempt to match
-                                                        an entry with the username provided by the user. Defaults to `(uid={0})`.
-                                                        `{0}` is substituted with the username provided when searching.
-| `user_search.attribute`                  | no       | This setting is deprecated; use `user_search.filter` instead.
-                                                        Specifies the attribute to match with the username presented
-                                                        to. Defaults to `uid`.
-| `user_search.pool.enabled`               | no       | Enables or disables connection pooling for user search. When
-                                                        disabled a new connection is created for every search. The
-                                                        default is `true`.
-| `user_search.pool.size`                  | no       | Specifies the maximum number of connections to the LDAP
-                                                        server to allow in the connection pool. Defaults to `20`.
-| `user_search.pool.initial_size`          | no       | The initial number of connections to create to the LDAP
-                                                        server on startup. Defaults to `0`. Values greater than `0`
-                                                        could cause startup failures if the LDAP server is down.
-| `user_search.pool.health_check.enabled`  | no       | Enables or disables a health check on LDAP connections in
-                                                        the connection pool. Connections are checked in the
-                                                        background at the specified interval. Defaults to `true`.
-| `user_search.pool.health_check.dn`       | no/yes   | Specifies the distinguished name to retrieve as part of
-                                                        the health check. Defaults to the value of `bind_dn`.
-                                                        This setting is required when `bind_dn` is not configured.
-| `user_search.pool.health_check.interval` | no       | How often to perform background checks of connections in
-                                                        the pool. Defaults to `60s`.
-|=======================
-
-.User Templates Mode Settings
-[cols="4,^3,10"]
-|=======================
-| Setting               | Required  | Description
-| `user_dn_templates`   | yes       | Specifies the DN template that replaces the
-                                      user name with the string `{0}`. This element
-                                      is multivalued, allowing for multiple user
-                                      contexts.
-|=======================
-
-
-NOTE:   If any settings starting with `user_search` are specified, the
-        `user_dn_templates` the settings are ignored.
-
+See {ref}/security-settings.html#ref-ldap-settings[LDAP Realm Settings].
 
 [[mapping-roles-ldap]]
 ==== Mapping LDAP Groups to Roles
diff --git a/x-pack/docs/en/settings/security-settings.asciidoc b/x-pack/docs/en/settings/security-settings.asciidoc
index 046d76784fb..97413ed07bb 100644
--- a/x-pack/docs/en/settings/security-settings.asciidoc
+++ b/x-pack/docs/en/settings/security-settings.asciidoc
@@ -150,9 +150,9 @@ For a native realm, the `type` must be set to `native`. In addition to the
 <>, you can specify  
 the following optional settings: 
 
-`cache.ttl`:: The time-to-live for cached user entries. User credentials are 
-cached for this period of time. Specify the time period using the standard 
-{es} <>. Defaults to `20m`.
+`cache.ttl`:: The time-to-live for cached user entries. A user and a hash of its 
+credentials are cached for this period of time. Specify the time period using 
+the standard {es} <>. Defaults to `20m`.
 
 `cache.max_users`:: The maximum number of user entries that can live in the 
 cache at any given time. Defaults to 100,000.
@@ -169,9 +169,9 @@ in-memory cached user credentials. For possible values, see
 ===== File realm settings
 
 `cache.ttl`::
-The time-to-live for cached user entries--user credentials are cached for
-this configured period of time. Defaults to `20m`. Specify values using the
-standard Elasticsearch {ref}/common-options.html#time-units[time units].
+The time-to-live for cached user entries. A user and a hash of its credentials 
+are cached for this configured period of time. Defaults to `20m`. Specify values 
+using the standard {es} {ref}/common-options.html#time-units[time units].
 Defaults to `20m`.
 
 `cache.max_users`::
@@ -186,12 +186,18 @@ all possible values. Defaults to `ssha256`.
 [[ref-ldap-settings]]
 [float]
 ===== LDAP realm settings
-`url`::
-An LDAP URL in the format `ldap[s]://:`. Required.
+
+The `type` setting must be set to `ldap`. In addition to the 
+<>, you can specify the following settings: 
+
+`url`:: Specifies one or more LDAP URLs in the format  
+`ldap[s]://:`. Multiple URLs can be defined using a comma 
+separated value or array syntax: `[ "ldaps://server1:636", "ldaps://server2:636" ]`. 
+`ldaps` and `ldap` URL protocols cannot be mixed in the same realm. Required.
 
 `load_balance.type`::
 The behavior to use when there are multiple LDAP URLs defined. For supported
-values see {xpack-ref}/ldap-realm.html#ldap-load-balancing[LDAP load balancing and failover types].
+values see <>.
 Defaults to `failover`.
 
 `load_balance.cache_ttl`::
@@ -200,36 +206,45 @@ this setting controls the amount of time to cache DNS lookups. Defaults
 to `1h`.
 
 `bind_dn`::
-The DN of the user that will be used to bind to the LDAP and perform searches.
-Only applicable in {xpack-ref}/ldap-realm.html#ldap-user-search[user search mode].
-If this is not specified, an anonymous bind will be attempted.
-Defaults to Empty.
+The DN of the user that is used to bind to the LDAP and perform searches.
+Only applicable in user search mode.
+If not specified, an anonymous bind is attempted.
+Defaults to Empty. Due to its potential security impact, `bind_dn` is not 
+exposed via the <>.
 
 `bind_password`::
-The password for the user that will be used to bind to the LDAP directory.
-Defaults to Empty.
-*Deprecated.* Use `secure_bind_password` instead.
+deprecated[6.3] Use `secure_bind_password` instead. The password for the user 
+that is used to bind to the LDAP directory.
+Defaults to Empty. Due to its potential security impact, `bind_password` is not 
+exposed via the <>.
+
 
 `secure_bind_password` (<>)::
-The password for the user that will be used to bind to the LDAP directory.
+The password for the user that is used to bind to the LDAP directory.
 Defaults to Empty.
 
 `user_dn_templates`::
 The DN template that replaces the user name with the string `{0}`.
-This element is multivalued; you can specify multiple user contexts.
-Required to operate in user template mode. Not valid
-if `user_search.base_dn` is specified. For more information on
+This setting is multivalued; you can specify multiple user contexts.
+Required to operate in user template mode. If `user_search.base_dn` is specified, 
+this setting is not valid. For more information on
 the different modes, see {xpack-ref}/ldap-realm.html[LDAP realms].
++
+--
+NOTE: If any settings starting with `user_search` are specified, the 
+`user_dn_templates` settings are ignored.
+
+--
 
 `user_group_attribute`::
 Specifies the attribute to examine on the user for group membership.
-The default is `memberOf`. This setting will be ignored if any
-`group_search` settings are specified. Defaults to  `memberOf`.
+If any `group_search` settings are specified, this setting is ignored. Defaults 
+to `memberOf`.
 
 `user_search.base_dn`::
 Specifies a container DN to search for users. Required
-to operated in user search mode. Not valid if
-`user_dn_templates is specified. For more information on
+to operated in user search mode. If `user_dn_templates` is specified, this 
+setting is not valid. For more information on
 the different modes, see {xpack-ref}/ldap-realm.html[LDAP realms].
 
 `user_search.scope`::
@@ -240,18 +255,18 @@ The scope of the user search. Valid values are `sub_tree`, `one_level` or
 the only user considered. Defaults to  `sub_tree`.
 
 `user_search.filter`::
-Specifies the filter used to search the directory in attempt to match
+Specifies the filter used to search the directory in attempts to match
 an entry with the username provided by the user. Defaults to `(uid={0})`.
 `{0}` is substituted with the username provided when searching.
 
 `user_search.attribute`::
-This setting is deprecated; use `user_search.filter` instead.
-The attribute to match with the username presented to. Defaults to `uid`.
+deprecated[5.6] Use `user_search.filter` instead.
+The attribute to match with the username sent with the request. Defaults to `uid`.
 
 `user_search.pool.enabled`::
-Enables or disables connection pooling for user search. When
-disabled a new connection is created for every search. The
-default is `true` when `bind_dn` is provided.
+Enables or disables connection pooling for user search. If set to `false`, a new 
+connection is created for every search. The
+default is `true` when `bind_dn` is set.
 
 `user_search.pool.size`::
 The maximum number of connections to the LDAP server to allow in the
@@ -259,17 +274,18 @@ connection pool. Defaults to `20`.
 
 `user_search.pool.initial_size`::
 The initial number of connections to create to the LDAP server on startup.
-Defaults to `0`.
+Defaults to `0`. If the LDAP server is down, values greater than `0` could cause 
+startup failures.
 
 `user_search.pool.health_check.enabled`::
-Flag to enable or disable a health check on LDAP connections in the connection
+Enables or disables a health check on LDAP connections in the connection
 pool. Connections are checked in the background at the specified interval.
 Defaults to `true`.
 
 `user_search.pool.health_check.dn`::
-The distinguished name to be retrieved as part of the health check.
-Defaults to the value of `bind_dn` if present, and if
-not falls back to `user_search.base_dn`.
+The distinguished name that is retrieved as part of the health check.
+Defaults to the value of `bind_dn` if present; if
+not, falls back to `user_search.base_dn`.
 
 `user_search.pool.health_check.interval`::
 The interval to perform background checks of connections in the pool.
@@ -277,7 +293,7 @@ Defaults to `60s`.
 
 `group_search.base_dn`::
 The container DN to search for groups in which the user has membership. When
-this element is absent, Security searches for the attribute specified by
+this element is absent, {security} searches for the attribute specified by
 `user_group_attribute` set on the user in order to determine group membership.
 
 `group_search.scope`::
@@ -287,30 +303,33 @@ Specifies whether the group search should be `sub_tree`, `one_level` or
 `base` specifies that the `base_dn` is a group object, and that it is the
 only group considered. Defaults to  `sub_tree`.
 
-`group_search.filter`::
+`group_search.filter`:: 
+Specifies a filter to use to look up a group. 
 When not set, the realm searches for `group`, `groupOfNames`, `groupOfUniqueNames`,
 or `posixGroup` with the attributes `member`, `memberOf`, or `memberUid`.  Any
 instance of `{0}` in the filter is replaced by the user attribute defined in
 `group_search.user_attribute`.
 
 `group_search.user_attribute`::
-Specifies the user attribute that will be fetched and provided as a parameter to
+Specifies the user attribute that is fetched and provided as a parameter to
 the filter.  If not set, the user DN is passed into the filter. Defaults to Empty.
 
 `unmapped_groups_as_roles`::
-Takes a boolean variable. When this element is set to `true`, the names of any
-LDAP groups that are not referenced in a role-mapping _file_ are used as role
-names and assigned to the user. Defaults to `false`.
+If set to `true`, the names of any unmapped LDAP groups are used as role names 
+and assigned to the user. A group is considered to be _unmapped_ if it is not 
+not referenced in a 
+{xpack-ref}/mapping-roles.html#mapping-roles-file[role-mapping file]. API-based 
+role mappings are not considered. Defaults to `false`.
 
 `files.role_mapping`::
 The {xpack-ref}/security-files.html[location] for the {xpack-ref}/mapping-roles.html#mapping-roles[
 YAML role mapping configuration file]. Defaults to
-`CONFIG_DIR/x-pack/role_mapping.yml`.
+`CONFIG_DIR/role_mapping.yml`.
 
 `follow_referrals`::
-Boolean value that specifies whether Securityshould follow referrals returned
+Specifies whether {security} should follow referrals returned
 by the LDAP server. Referrals are URLs returned by the server that are to be
-used to continue the LDAP operation (e.g. search). Defaults to `true`.
+used to continue the LDAP operation (for example, search). Defaults to `true`.
 
 `metadata`::
 A list of additional LDAP attributes that should be loaded from the
@@ -332,7 +351,9 @@ An `s` at the end indicates seconds, or `ms` indicates milliseconds.
 Defaults to `5s` (5 seconds ).
 
 `ssl.key`::
-Path to a PEM encoded file containing the private key.
+Path to a PEM encoded file containing the private key, which is used if the 
+LDAP server requires client authentication. `ssl.key` and `ssl.keystore.path` 
+cannot be used at the same time.
 
 `ssl.key_passphrase`::
 The passphrase that is used to decrypt the private key. This value is
@@ -346,7 +367,9 @@ Path to a PEM encoded file containing the certificate (or certificate chain)
 that will be presented to clients when they connect.
 
 `ssl.certificate_authorities`::
-List of paths to PEM encoded certificate files that should be trusted.
+List of paths to PEM encoded certificate files that should be trusted. 
+`ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the 
+same time.
 
 `ssl.keystore.path`::
 The path to the Java Keystore file that contains a private key and certificate.
@@ -370,7 +393,7 @@ The password for the key in the keystore. Defaults to the keystore password.
 
 `ssl.truststore.path`::
 The path to the Java Keystore file that contains the certificates to trust.
-`ssl.certificate_authorities` and `ssl.truststore.path` may not be used at the same time.
+`ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the same time.
 
 `ssl.truststore.password`::
 The password to the truststore.
@@ -391,18 +414,19 @@ See <> for an explanation of
 these values.
 
 `ssl.supported_protocols`::
-Supported protocols with versions. Defaults to the value of
+Supported protocols for TLS/SSL (with versions). Defaults to the value of
 `xpack.ssl.supported_protocols`.
 
-`ssl.cipher_suites`
+`ssl.cipher_suites`:: Specifies the cipher suites that should be supported when 
+communicating with the LDAP server. 
 Supported cipher suites can be found in Oracle's http://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html[
 Java Cryptography Architecture documentation]. Defaults to the value of
 `xpack.ssl.cipher_suites`.
 
 `cache.ttl`::
-Specifies the time-to-live for cached user entries (a user and its credentials
-are cached for this period of time). Use the standard Elasticsearch
-{ref}/common-options.html#time-units[time units]). Defaults to  `20m`.
+Specifies the time-to-live for cached user entries. A user and a hash of its 
+credentials are cached for this period of time. Use the standard {es}
+<>. Defaults to  `20m`.
 
 `cache.max_users`::
 Specifies the maximum number of user entries that the cache can contain.
@@ -410,8 +434,8 @@ Defaults to `100000`.
 
 `cache.hash_algo`::
 (Expert Setting) Specifies the hashing algorithm that is used for the
-in-memory cached user credentials (see {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms]
-table for all possible values). Defaults to `ssha256`.
+in-memory cached user credentials. See {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms]
+table for all possible values. Defaults to `ssha256`.
 
 [[ref-ad-settings]]
 [float]
@@ -612,8 +636,8 @@ Java Cryptography Architecture documentation]. Defaults to the value of
 `xpack.ssl.cipher_suites`.
 
 `cache.ttl`::
-Specifies the time-to-live for cached user entries (user
-credentials are cached for this configured period of time). Use the
+Specifies the time-to-live for cached user entries. A user and a hash of its 
+credentials are cached for this configured period of time. Use the
 standard Elasticsearch {ref}/common-options.html#time-units[time units]).
 Defaults to `20m`.
 
@@ -663,8 +687,9 @@ Specifies the {xpack-ref}/security-files.html[location] of the
 Defaults to `CONFIG_DIR/x-pack/role_mapping.yml`.
 
 `cache.ttl`::
-Specifies the time-to-live for cached user entries. Use the
-standard Elasticsearch {ref}/common-options.html#time-units[time units]).
+Specifies the time-to-live for cached user entries. A user and a hash of its 
+credentials are cached for this period of time. Use the
+standard {es} {ref}/common-options.html#time-units[time units]).
 Defaults to `20m`.
 
 `cache.max_users`::
@@ -935,6 +960,32 @@ supported protocols for TLS/SSL.
 If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the
 cipher suites that should be supported.
 
+[float]
+[[load-balancing]]
+===== Load balancing and failover
+
+The `load_balance.type` setting can have the following values: 
+
+* `failover`: The URLs specified are used in the order that they are specified. 
+The first server that can be connected to will be used for all subsequent 
+connections. If a connection to that server fails then the next server that a 
+connection can be established to will be used for subsequent connections.
+* `dns_failover`: In this mode of operation, only a single URL may be specified.
+This URL must contain a DNS name. The system will be queried for all IP 
+addresses that correspond to this DNS name. Connections to the Active Directory 
+or LDAP server will always be tried in the order in which they were retrieved. 
+This differs from `failover` in that there is no reordering of the list and if a 
+server has failed at the beginning of the list, it will still be tried for each 
+subsequent connection.
+* `round_robin`: Connections will continuously iterate through the list of 
+provided URLs. If a server is unavailable, iterating through the list of URLs 
+will continue until a successful connection is made.
+* `dns_round_robin`: In this mode of operation, only a single URL may be 
+specified. This URL must contain a DNS name. The system will be queried for all 
+IP addresses that correspond to this DNS name. Connections will continuously 
+iterate through the list of addresses. If a server is unavailable, iterating 
+through the list of URLs will continue until a successful connection is made.
+
 [float]
 [[ssl-tls-settings]]
 ==== Default TLS/SSL settings

From e95a7aa6f0df00e71725c4888f107ab55b35916b Mon Sep 17 00:00:00 2001
From: Lisa Cawley 
Date: Mon, 30 Apr 2018 08:52:21 -0700
Subject: [PATCH 27/68] [DOCS] Removes redundant Active Directory realm
 settings (#30190)

---
 .../active-directory-realm.asciidoc           | 178 +-----------------
 .../en/settings/security-settings.asciidoc    |  96 ++++++----
 2 files changed, 67 insertions(+), 207 deletions(-)

diff --git a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc
index 2aaca6def91..143156ca636 100644
--- a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc
+++ b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc
@@ -169,186 +169,14 @@ domain name from the NetBIOS name.
 ===== Load Balancing and Failover
 The `load_balance.type` setting can be used at the realm level to configure how
 {security} should interact with multiple Active Directory servers. Two modes of
-operation are supported: failover and load balancing
+operation are supported: failover and load balancing.
 
-.Load Balancing and Failover Types
-|=======================
-| Type              | | | Description
-| `failover`        | | | The URLs specified are used in the order that they are
-                          specified. The first server that can be connected to will
-                          be used for all subsequent connections. If a connection to
-                          that server fails then the next server that a connection
-                          can be established to will be used for subsequent connections.
-| `dns_failover`    | | | In this mode of operation, only a single URL may be specified.
-                          This URL must contain a DNS name. The system will be queried
-                          for all IP addresses that correspond to this DNS name.
-                          Connections to the Active Directory server will always be
-                          tried in the order in which they were retrieved. This differs
-                          from `failover` in that there is no reordering of the list
-                          and if a server has failed at the beginning of the list, it
-                          will still be tried for each subsequent connection.
-| `round_robin`     | | | Connections will continuously iterate through the list of
-                          provided URLs. If a server is unavailable, iterating through
-                          the list of URLs will continue until a successful connection
-                          is made.
-| `dns_round_robin` | | | In this mode of operation, only a single URL may be specified.
-                          This URL must contain a DNS name. The system will be queried
-                          for all IP addresses that correspond to this DNS name.
-                          Connections will continuously iterate through the list of
-                          addresses. If a server is unavailable, iterating through the
-                          list of URLs will continue until a successful connection is
-                          made.
-|=======================
+See {ref}/security-settings.html#load-balancing[Load Balancing and Failover Settings].
 
 [[ad-settings]]
 ===== Active Directory Realm Settings
 
-[cols="4,^3,10"]
-|=======================
-| Setting                    | Required | Description
-| `type`                     | yes      | Indicates the realm type. Must be set to `active_directory`.
-| `order`                    | no       | Indicates the priority of this realm within the realm chain.
-                                          Realms with a lower order are consulted first. Although not
-                                          required, we recommend explicitly setting this value when
-                                          you configure multiple realms. Defaults to `Integer.MAX_VALUE`.
-| `enabled`                  | no       | Indicates whether this realm is enabled or disabled. Enables
-                                          you to disable a realm without removing its configuration.
-                                          Defaults to `true`.
-| `domain_name`              | yes      | Specifies the domain name of the Active Directory. {security}
-                                          uses the domain name to derive the LDAP URL and `user_search_dn`
-                                          if those fields are not specified.
-| `url`                      | no/yes   | Specifies an LDAP URL of the form `ldap[s]://:`.
-                                          {security} attempts to authenticate against this URL. If the
-                                          URL is not specified, it is derived from the `domain_name`,
-                                          assuming an unencrypted connection to port 389. For example,
-                                          `ldap://:389`. This settings is required when
-                                          connecting using SSL/TLS or via a custom port.
-| `bind_dn`                  | no       | The DN of the user that is used to bind to Active Directory
-                                          and perform searches. Due to its potential security
-                                          impact, `bind_dn` is not exposed via the
-                                          {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
-| `bind_password`            | no       | The password for the user that is used to bind to
-                                          Active Directory. Due to its potential security impact,
-                                          `bind_password` is not exposed via the
-                                          {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
-                                          *Deprecated.* Use `secure_bind_password` instead. 
-| `secure_bind_password`     | no       | ({ref}/secure-settings.html[Secure])
-                                          The password for the user that is used to bind to Active Directory.
-| `load_balance.type`        | no       | The behavior to use when there are multiple LDAP URLs defined.
-                                          For supported values see <>.
-| `load_balance.cache_ttl`   | no       | When using `dns_failover` or `dns_round_robin` as the load
-                                          balancing type, this setting controls the amount of time to
-                                          cache DNS lookups. Defaults to `1h`.
-| `user_search.base_dn`      | no       | Specifies the context to search for the user. Defaults to the
-                                          root of the Active Directory domain.
-| `user_search.scope`        | no       | Specifies whether the user search should be `sub_tree` (default),
-                                          `one_level`, or `base`. `sub_tree` searches all objects contained
-                                          under `base_dn`. `one_level` only searches users directly
-                                          contained within the `base_dn`. `base` specifies that the
-                                          `base_dn` is a user object and that it is the only user considered.
-| `user_search.filter`       | no       | Specifies a filter to use to lookup a user given a username.
-                                          The default filter looks up `user` objects with either
-                                          `sAMAccountName` or `userPrincipalName`. If specified, this
-                                          must be a valid LDAP user search filter, for example
-                                          `(&(objectClass=user)(sAMAccountName={0}))`. For more
-                                          information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
-| `user_search.upn_filter`   | no       | Specifies a filter to use to lookup a user given a user principal name.
-                                          The default filter looks up `user` objects with
-                                          a matching `userPrincipalName`. If specified, this
-                                          must be a valid LDAP user search filter, for example
-                                          `(&(objectClass=user)(userPrincipalName={1}))`. `{1}` is
-                                          the full user principal name provided by the user. For more
-                                          information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
-| `user_search.down_level_filter` | no  | Specifies a filter to use to lookup a user given a down level logon name (DOMAIN\user).
-                                          The default filter looks up `user` objects with a matching
-                                          `sAMAccountName` in the domain provided. If specified, this
-                                          must be a valid LDAP user search filter, for example
-                                          `(&(objectClass=user)(sAMAccountName={0}))`. For more
-                                          information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
-| `user_search.pool.enabled`      | no  | Enables or disables connection pooling for user search. When
-                                          disabled a new connection is created for every search. The
-                                          default is `true` when `bind_dn` is provided.
-| `user_search.pool.size`         | no  | Specifies the maximum number of connections to Active Directory
-                                          server to allow in the connection pool. Defaults to `20`.
-| `user_search.pool.initial_size` | no  | The initial number of connections to create to Active Directory
-                                          server on startup. Defaults to `0`. Values greater than `0`
-                                          could cause startup failures if the LDAP server is down.
-| `user_search.pool.health_check.enabled` | no | Enables or disables a health check on Active Directory connections in
-                                                 the connection pool. Connections are checked in the
-                                                 background at the specified interval. Defaults to `true`.
-| `user_search.pool.health_check.dn`      | no | Specifies the distinguished name to retrieve as part of
-                                                 the health check. Defaults to the value of `bind_dn` if present, and if
-                                                 not falls back to `user_search.base_dn`.
-| `user_search.pool.health_check.interval` | no | How often to perform background checks of connections in
-                                                  the pool. Defaults to `60s`.
-| `group_search.base_dn`     | no       | Specifies the context to search for groups in which the user
-                                          has membership. Defaults to the root of the Active Directory
-                                          domain.
-| `group_search.scope`       | no       | Specifies whether the group search should be `sub_tree` (default),
-                                          `one_level` or `base`.  `sub_tree` searches all objects contained
-                                          under `base_dn`. `one_level` searches for groups directly
-                                          contained within the `base_dn`. `base` specifies that the
-                                          `base_dn` is a group object and that it is the only group considered.
-| `unmapped_groups_as_roles` | no       | Specifies whether the names of any unmapped Active Directory
-                                          groups should be used as role names and assigned to the user.
-                                          A group is considered to be _unmapped_ if it is not referenced
-                                          in any <> (API based
-                                          role-mappings are not considered).
-                                          Defaults to `false`.
-| `files.role_mapping`       | no       | Specifies the path and file name of the
-                                          <>.
-                                          Defaults to `ES_PATH_CONF/x-pack/role_mapping.yml`,
-                                          where `ES_PATH_CONF` is `ES_HOME/config` (zip/tar installations)
-                                          or `/etc/elasticsearch` (package installations).
-| `follow_referrals`         | no       | Specifies whether {security} should follow referrals returned
-                                          by the Active Directory server. Referrals are URLs returned by
-                                          the server that are to be used to continue the LDAP operation
-                                          (such as `search`). Defaults to `true`.
-| `metadata`                 | no       | Specifies the list of additional LDAP attributes that should
-                                          be stored in the `metadata` of an authenticated user.
-| `ssl.key`                  | no       | Specifies the path to the PEM encoded private key to use if the Active Directory
-                                          server requires client authentication. `ssl.key` and `ssl.keystore.path` may not be used at the
-                                          same time.
-| `ssl.key_passphrase`       | no       | Specifies the passphrase to decrypt the PEM encoded private key if it is encrypted.
-| `ssl.certificate`          | no       | Specifies the path to the PEM encoded certificate (or certificate chain) that goes with the key
-                                          if the Active Directory server requires client authentication.
-| `ssl.certificate_authorities`| no     | Specifies the paths to the PEM encoded certificate authority certificates that
-                                          should be trusted. `ssl.certificate_authorities` and `ssl.truststore.path` may not be used at
-                                          the same time.
-| `ssl.keystore.path`        | no       | The path to the Java Keystore file that contains a private key and certificate. `ssl.key` and
-                                          `ssl.keystore.path` may not be used at the same time.
-| `ssl.keystore.password`    | no       | The password to the keystore.
-| `ssl.keystore.key_password`| no       | The password for the key in the keystore. Defaults to the keystore password.
-| `ssl.truststore.path`      | no       | The path to the Java Keystore file that contains the certificates to trust.
-                                          `ssl.certificate_authorities` and `ssl.truststore.path` may not be used at the same time.
-| `ssl.truststore.password`  | no       | The password to the truststore.
-| `ssl.verification_mode`    | no       | Specifies the type of verification to be performed when
-                                          connecting to an Active Directory server using `ldaps`. When
-                                          set to `full`, the hostname or IP address used in the `url`
-                                          must match one of the names in the certificate or the
-                                          connection will not be allowed. Due to their potential security impact,
-                                          `ssl` settings are not exposed via the
-                                          {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
-+
-                                          Values are `none`, `certificate`, and `full`. Defaults to `full`.
-+
-                                          See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`]
-                                          for an explanation of these values.
-| `ssl.supported_protocols`  | no       | Specifies the supported protocols for TLS/SSL.
-| `ssl.cipher_suites`        | no       | Specifies the cipher suites that should be supported when communicating
-                                          with the Active Directory server.
-| `cache.ttl`                | no       | Specifies the time-to-live for cached user entries. A user's
-                                          credentials are cached for this period of time. Specify the
-                                          time period using the standard Elasticsearch
-                                          {ref}/common-options.html#time-units[time units].
-                                          Defaults to `20m`.
-| `cache.max_users`          | no       | Specifies the maximum number of user entries that can be
-                                          stored in the cache at one time. Defaults to 100,000.
-| `cache.hash_algo`          | no       | Specifies the hashing algorithm that is used for the
-                                          cached user credentials.
-                                          See <> for the
-                                          possible values. (Expert Setting).
-|=======================
+See {ref}/security-settings.html#ref-ad-settings[Active Directory Realm Settings].
 
 [[mapping-roles-ad]]
 ==== Mapping Active Directory Users and Groups to Roles
diff --git a/x-pack/docs/en/settings/security-settings.asciidoc b/x-pack/docs/en/settings/security-settings.asciidoc
index 97413ed07bb..139e54467b7 100644
--- a/x-pack/docs/en/settings/security-settings.asciidoc
+++ b/x-pack/docs/en/settings/security-settings.asciidoc
@@ -441,13 +441,21 @@ table for all possible values. Defaults to `ssha256`.
 [float]
 ===== Active Directory realm settings
 
+The `type` setting must be set to `active_directory`. In addition to the 
+<>, you can specify  
+the following settings: 
+
 `url`::
-A URL in the format `ldap[s]://:`. Defaults to `ldap://:389`.
+An LDAP URL of the form `ldap[s]://:`. {security} attempts to 
+authenticate against this URL. If the URL is not specified, it is derived from 
+the `domain_name` setting and assumes an unencrypted connection to port 389. 
+Defaults to `ldap://:389`. This setting is required when connecting 
+using SSL/TLS or when using a custom port.
 
 `load_balance.type`::
 The behavior to use when there are multiple LDAP URLs defined. For supported
-values see {xpack-ref}/active-directory-realm.html#ad-load-balancing[load balancing and failover types].
-Defaults to  `failover`.
+values see <>.
+Defaults to `failover`.
 
 `load_balance.cache_ttl`::
 When using `dns_failover` or `dns_round_robin` as the load balancing type,
@@ -455,31 +463,34 @@ this setting controls the amount of time to cache DNS lookups. Defaults
 to `1h`.
 
 `domain_name`::
-The domain name of Active Directory. The cluster can derive the URL and
-`user_search_dn` fields from values in this element if those fields are not
-otherwise specified. Required.
+The domain name of Active Directory. If the the `url` and `user_search_dn` 
+settings are not specified, the cluster can derive those values from this 
+setting. Required.
 
 `bind_dn`::
-The DN of the user that will be used to bind to Active Directory and perform searches.
-Defaults to Empty.
+The DN of the user that is used to bind to Active Directory and perform searches.
+Defaults to Empty. Due to its potential security impact, `bind_dn` is not 
+exposed via the <>.
 
 `bind_password`::
-The password for the user that will be used to bind to Active Directory.
-Defaults to Empty.
-*Deprecated.* Use `secure_bind_password` instead.
+deprecated[6.3] Use `secure_bind_password` instead. The password for the user 
+that is used to bind to Active Directory. Defaults to Empty. Due to its 
+potential security impact, `bind_password` is not exposed via the 
+<>.
 
 `secure_bind_password` (<>)::
-The password for the user that will be used to bind to Active Directory.
+The password for the user that is used to bind to Active Directory.
 Defaults to Empty.
 
 `unmapped_groups_as_roles`::
-Takes a boolean variable. When this element is set to `true`, the names of any
-LDAP groups that are not referenced in a role-mapping _file_ are used as role
-names and assigned to the user. Defaults to `false`.
+If set to `true`, the names of any unmapped Active Directory groups are used as 
+role names and assigned to the user. A group is considered _unmapped_ when it 
+is not referenced in any role-mapping files. API-based role mappings are not 
+considered. Defaults to `false`.
 
 `files.role_mapping`::
 The {xpack-ref}/security-files.html[location] for the YAML
-role mapping configuration file. Defaults to  `CONFIG_DIR/x-pack/role_mapping.yml`.
+role mapping configuration file. Defaults to `CONFIG_DIR/role_mapping.yml`.
 
 `user_search.base_dn`::
 The context to search for a user. Defaults to the root
@@ -495,22 +506,27 @@ only user considered. Defaults to `sub_tree`.
 `user_search.filter`::
 Specifies a filter to use to lookup a user given a username.  The default
 filter looks up `user` objects with either `sAMAccountName` or
-`userPrincipalName`.
+`userPrincipalName`. If specified, this must be a valid LDAP user search filter. 
+For example `(&(objectClass=user)(sAMAccountName={0}))`. For more information, 
+see 
+https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
 
 `user_search.upn_filter`::
 Specifies a filter to use to lookup a user given a user principal name.
 The default filter looks up `user` objects with
 a matching `userPrincipalName`. If specified, this
-must be a valid LDAP user search filter, for example
+must be a valid LDAP user search filter. For example,
 `(&(objectClass=user)(userPrincipalName={1}))`. `{1}` is the full user principal name
-provided by the user.
+provided by the user. For more information, see 
+https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
 
 `user_search.down_level_filter`::
 Specifies a filter to use to lookup a user given a down level logon name
 (DOMAIN\user). The default filter looks up `user` objects with a matching
 `sAMAccountName` in the domain provided. If specified, this
-must be a valid LDAP user search filter, for example
-`(&(objectClass=user)(sAMAccountName={0}))`.
+must be a valid LDAP user search filter. For example,
+`(&(objectClass=user)(sAMAccountName={0}))`. For more information, see 
+https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax]. 
 
 `user_search.pool.enabled`::
 Enables or disables connection pooling for user search. When
@@ -523,16 +539,18 @@ connection pool. Defaults to `20`.
 
 `user_search.pool.initial_size`::
 The initial number of connections to create to the Active Directory server on startup.
-Defaults to `0`.
+Defaults to `0`. If the LDAP server is down, values greater than 0 
+could cause startup failures. 
 
 `user_search.pool.health_check.enabled`::
-Flag to enable or disable a health check on Active Directory connections in the connection
+Enables or disables a health check on Active Directory connections in the connection
 pool. Connections are checked in the background at the specified interval.
 Defaults to `true`.
 
 `user_search.pool.health_check.dn`::
 The distinguished name to be retrieved as part of the health check.
-Defaults to the value of `bind_dn` if it is a distinguished name.
+Defaults to the value of `bind_dn` if that setting is present. Otherwise, it 
+defaults to the value of the `user_search.base_dn` setting. 
 
 `user_search.pool.health_check.interval`::
 The interval to perform background checks of connections in the pool.
@@ -540,7 +558,7 @@ Defaults to `60s`.
 
 `group_search.base_dn`::
 The context to search for groups in which the user has membership.  Defaults
-to the root of the  Active Directory domain.
+to the root of the Active Directory domain.
 
 `group_search.scope`::
 Specifies whether the group search should be `sub_tree`, `one_level` or
@@ -570,13 +588,18 @@ Defaults to `5s` (5 seconds ).
 
 `ssl.certificate`::
 Path to a PEM encoded file containing the certificate (or certificate chain)
-that will be presented to clients when they connect.
+that will be presented to clients when they connect. 
 
 `ssl.certificate_authorities`::
-List of paths to PEM encoded certificate files that should be trusted.
+List of paths to PEM encoded certificate files that should be trusted. 
+`ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the 
+same time.
 
 `ssl.key`::
-Path to the PEM encoded file containing the private key.
+Path to the PEM encoded file containing the private key, which is used when the 
+Active Directory server requires client authentication. `ssl.key` and 
+`ssl.keystore.path` cannot be used at the same time.
+
 
 `ssl.key_passphrase`::
 The passphrase that is used to decrypt the private key. This value is
@@ -600,6 +623,7 @@ The password to the keystore.
 
 `ssl.keystore.path`::
 The path to the Java Keystore file that contains a private key and certificate.
+`ssl.key` and `ssl.keystore.path` cannot be used at the same time.
 
 `ssl.keystore.type`::
 The format of the keystore file. Should be either `jks` to use the Java
@@ -613,6 +637,8 @@ The password to the truststore.
 
 `ssl.truststore.path`::
 The path to the Java Keystore file that contains the certificates to trust.
+`ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the 
+same time.
 
 `ssl.truststore.type`::
 The format of the truststore file. Should be either `jks` to use the Java
@@ -627,10 +653,11 @@ See <> for an explanation of
 these values.
 
 `ssl.supported_protocols`::
-Supported protocols with versions. Defaults to the value of
+Supported protocols for TLS/SSL (with versions). Defaults to the value of
 `xpack.ssl.supported_protocols`.
 
-`ssl.cipher_suites`::
+`ssl.cipher_suites`:: Specifies the cipher suites that should be supported when 
+communicating with the Active Directory server. 
 Supported cipher suites can be found in Oracle's http://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html[
 Java Cryptography Architecture documentation]. Defaults to the value of
 `xpack.ssl.cipher_suites`.
@@ -646,9 +673,14 @@ Specifies the maximum number of user entries that the cache can contain.
 Defaults to `100000`.
 
 `cache.hash_algo`::
-(Expert Setting) Specifies the hashing algorithm that will be used for
+(Expert Setting) Specifies the hashing algorithm that is used for
 the in-memory cached user credentials (see {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] table for all possible values). Defaults to `ssha256`.
 
+`follow_referrals`::
+If set to `true` {security} follows referrals returned by the LDAP server. 
+Referrals are URLs returned by the server that are to be used to continue the 
+LDAP operation (such as `search`). Defaults to `true`.
+
 [[ref-pki-settings]]
 [float]
 ===== PKI realm settings
@@ -684,7 +716,7 @@ documentation for more information. This setting cannot be used with
 `files.role_mapping`::
 Specifies the {xpack-ref}/security-files.html[location] of the
 {xpack-ref}/mapping-roles.html[YAML role  mapping configuration file].
-Defaults to `CONFIG_DIR/x-pack/role_mapping.yml`.
+Defaults to `CONFIG_DIR/role_mapping.yml`.
 
 `cache.ttl`::
 Specifies the time-to-live for cached user entries. A user and a hash of its 

From d553a8be2f00c352e7489ba6ebce755f927d9823 Mon Sep 17 00:00:00 2001
From: David Turner 
Date: Mon, 30 Apr 2018 17:31:11 +0100
Subject: [PATCH 28/68] Improve docs for disk watermarks (#30249)

* Clarify that the low watermark does not affect brand-new shards.
* Replace ES -> Elasticsearch.
* Format to 80 columns.

Resolves #25163
---
 .../modules/cluster/disk_allocator.asciidoc   | 54 ++++++++++---------
 1 file changed, 29 insertions(+), 25 deletions(-)

diff --git a/docs/reference/modules/cluster/disk_allocator.asciidoc b/docs/reference/modules/cluster/disk_allocator.asciidoc
index 0f43d9fcd30..d93453a49e8 100644
--- a/docs/reference/modules/cluster/disk_allocator.asciidoc
+++ b/docs/reference/modules/cluster/disk_allocator.asciidoc
@@ -1,9 +1,9 @@
 [[disk-allocator]]
 === Disk-based Shard Allocation
 
-Elasticsearch factors in the available disk space on a node before deciding
-whether to allocate new shards to that node or to actively relocate shards
-away from that node.
+Elasticsearch considers the available disk space on a node before deciding
+whether to allocate new shards to that node or to actively relocate shards away
+from that node.
 
 Below are the settings that can be configured in the `elasticsearch.yml` config
 file or updated dynamically on a live cluster with the
@@ -15,29 +15,33 @@ file or updated dynamically on a live cluster with the
 
 `cluster.routing.allocation.disk.watermark.low`::
 
-    Controls the low watermark for disk usage. It defaults to 85%, meaning ES will
-    not allocate new shards to nodes once they have more than 85% disk used. It
-    can also be set to an absolute byte value (like 500mb) to prevent ES from
-    allocating shards if less than the configured amount of space is available.
+    Controls the low watermark for disk usage. It defaults to `85%`, meaning
+    that Elasticsearch will not allocate shards to nodes that have more than
+    85% disk used. It can also be set to an absolute byte value (like `500mb`)
+    to prevent Elasticsearch from allocating shards if less than the specified
+    amount of space is available. This setting has no effect on the primary
+    shards of newly-created indices or, specifically, any shards that have
+    never previously been allocated.
 
 `cluster.routing.allocation.disk.watermark.high`::
 
-    Controls the high watermark. It defaults to 90%, meaning ES will attempt to
-    relocate shards to another node if the node disk usage rises above 90%. It can
-    also be set to an absolute byte value (similar to the low watermark) to
-    relocate shards once less than the configured amount of space is available on
-    the node.
+    Controls the high watermark. It defaults to `90%`, meaning that
+    Elasticsearch will attempt to relocate shards away from a node whose disk
+    usage is above 90%. It can also be set to an absolute byte value (similarly
+    to the low watermark) to relocate shards away from a node if it has less
+    than the specified amount of free space. This setting affects the
+    allocation of all shards, whether previously allocated or not.
 
 `cluster.routing.allocation.disk.watermark.flood_stage`::
 +
 --
-Controls the flood stage watermark. It defaults to 95%, meaning ES enforces
-a read-only index block (`index.blocks.read_only_allow_delete`) on every
-index that has one or more shards allocated on the node that has at least
-one disk exceeding the flood stage.  This is a last resort to prevent nodes
-from running out of disk space.  The index block must be released manually
-once there is enough disk space available to allow indexing operations to
-continue.
+Controls the flood stage watermark. It defaults to 95%, meaning that
+Elasticsearch enforces a read-only index block
+(`index.blocks.read_only_allow_delete`) on every index that has one or more
+shards allocated on the node that has at least one disk exceeding the flood
+stage. This is a last resort to prevent nodes from running out of disk space.
+The index block must be released manually once there is enough disk space
+available to allow indexing operations to continue.
 
 NOTE: You can not mix the usage of percentage values and byte values within
 these settings. Either all are set to percentage values, or all are set to byte
@@ -67,12 +71,12 @@ PUT /twitter/_settings
 `cluster.routing.allocation.disk.include_relocations`::
 
     Defaults to +true+, which means that Elasticsearch will take into account
-    shards that are currently being relocated to the target node when computing a
-    node's disk usage. Taking relocating shards' sizes into account may, however,
-    mean that the disk usage for a node is incorrectly estimated on the high side,
-    since the relocation could be 90% complete and a recently retrieved disk usage
-    would include the total size of the relocating shard as well as the space
-    already used by the running relocation.
+    shards that are currently being relocated to the target node when computing
+    a node's disk usage. Taking relocating shards' sizes into account may,
+    however, mean that the disk usage for a node is incorrectly estimated on
+    the high side, since the relocation could be 90% complete and a recently
+    retrieved disk usage would include the total size of the relocating shard
+    as well as the space already used by the running relocation.
 
 
 NOTE: Percentage values refer to used disk space, while byte values refer to

From 421bd9bd7a6a0914f9abf6d8fc1e63995b29e369 Mon Sep 17 00:00:00 2001
From: Chris Earle 
Date: Mon, 30 Apr 2018 14:53:48 -0400
Subject: [PATCH 29/68] _cluster/state Skip Test for pre-6.4, not pre-7.0
 (#30264)

This updates the skip section for the new `_cluster/state` responses to
include 6.4+ now that it has been backported.
---
 .../resources/rest-api-spec/test/cluster.state/10_basic.yml     | 2 +-
 .../resources/rest-api-spec/test/cluster.state/20_filtering.yml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml
index c38a33fdff1..ae9637c08dd 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml
@@ -22,7 +22,7 @@
 ---
 "get cluster state returns cluster_uuid at the top level":
   - skip:
-      version:  " - 6.99.99"
+      version:  " - 6.3.99"
       reason:   "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher"
 
   - do:
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml
index f9e5a0c03df..880efaff19a 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml
@@ -160,7 +160,7 @@ setup:
 ---
 "Filtering the cluster state returns cluster_uuid at the top level regardless of metric filters":
   - skip:
-      version:  " - 6.99.99"
+      version:  " - 6.3.99"
       reason:   "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher"
 
   - do:

From 225f7093a905ec29b0793751801d24db2e3209df Mon Sep 17 00:00:00 2001
From: David Roberts 
Date: Mon, 30 Apr 2018 20:05:27 +0100
Subject: [PATCH 30/68] [ML] Include 3rd party C++ component notices (#30132)

The overall NOTICE file for the ML X-Pack module should
include the notices from the 3rd party C++ components as
well as the 3rd party Java components.
---
 distribution/archives/build.gradle | 18 ++++++++++++++++++
 x-pack/plugin/ml/build.gradle      | 19 ++++++++++++++++++-
 2 files changed, 36 insertions(+), 1 deletion(-)

diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle
index f2fc297a9e4..9fa06021236 100644
--- a/distribution/archives/build.gradle
+++ b/distribution/archives/build.gradle
@@ -217,6 +217,24 @@ subprojects {
   }
   check.dependsOn checkNotice
 
+  if (project.name == 'zip' || project.name == 'tar') {
+    task checkMlCppNotice {
+      dependsOn buildDist, checkExtraction
+      onlyIf toolExists
+      doLast {
+        // this is just a small sample from the C++ notices, the idea being that if we've added these lines we've probably added all the required lines
+        final List expectedLines = Arrays.asList("Apache log4cxx", "Boost Software License - Version 1.0 - August 17th, 2003")
+        final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/modules/x-pack/x-pack-ml/NOTICE.txt")
+        final List actualLines = Files.readAllLines(noticePath)
+        for (final String expectedLine : expectedLines) {
+          if (actualLines.contains(expectedLine) == false) {
+            throw new GradleException("expected [${noticePath}] to contain [${expectedLine}] but it did not")
+          }
+        }
+      }
+    }
+    check.dependsOn checkMlCppNotice
+  }
 }
 
 /*****************************************************************************
diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle
index af2122d43d9..d9d4882b00e 100644
--- a/x-pack/plugin/ml/build.gradle
+++ b/x-pack/plugin/ml/build.gradle
@@ -64,6 +64,23 @@ artifacts {
     testArtifacts testJar
 }
 
+task extractNativeLicenses(type: Copy) {
+    dependsOn configurations.nativeBundle
+    into "${buildDir}"
+    from {
+        project.zipTree(configurations.nativeBundle.singleFile)
+    }
+    include 'platform/licenses/**'
+}
+project.afterEvaluate {
+    // Add an extra licenses directory to the combined notices
+    project.tasks.findByName('generateNotice').dependsOn extractNativeLicenses
+    project.tasks.findByName('generateNotice').licensesDir new File("${project.buildDir}/platform/licenses")
+    project.tasks.findByName('generateNotice').outputs.upToDateWhen {
+        extractNativeLicenses.state.upToDate
+    }
+}
+
 run {
     plugin xpackModule('core')
 }
@@ -85,7 +102,7 @@ task internalClusterTest(type: RandomizedTestingTask,
   include '**/*IT.class'
   systemProperty 'es.set.netty.runtime.available.processors', 'false'
 }
-check.dependsOn internalClusterTest 
+check.dependsOn internalClusterTest
 internalClusterTest.mustRunAfter test
 
 // also add an "alias" task to make typing on the command line easier

From 51902238f3c639324bf4607e24a47f76b37c8eac Mon Sep 17 00:00:00 2001
From: lcawley 
Date: Mon, 30 Apr 2018 13:20:22 -0700
Subject: [PATCH 31/68] [DOCS] Fixes syskeygen command name

---
 x-pack/docs/en/watcher/encrypting-data.asciidoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/x-pack/docs/en/watcher/encrypting-data.asciidoc b/x-pack/docs/en/watcher/encrypting-data.asciidoc
index ca06d9666bb..166ef6f14d7 100644
--- a/x-pack/docs/en/watcher/encrypting-data.asciidoc
+++ b/x-pack/docs/en/watcher/encrypting-data.asciidoc
@@ -8,7 +8,7 @@ cluster.
 
 To encrypt sensitive data in {watcher}:
 
-. Use the {ref}/syskeygen.html[syskeygen] command to create a system key file.
+. Use the {ref}/syskeygen.html[elasticsearch-syskeygen] command to create a system key file.
 
 . Copy the `system_key` file to all of the nodes in your cluster.
 +

From e11070bcfaed301d1c0e9358518eebacd9f9992e Mon Sep 17 00:00:00 2001
From: Paul Sanwald 
Date: Mon, 30 Apr 2018 14:09:32 -0700
Subject: [PATCH 32/68] Fix macros in changelog (#30269)

remove comments for macros which caused macros not to work correctly
---
 docs/CHANGELOG.asciidoc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc
index b46799d7a8e..1ef6534616f 100644
--- a/docs/CHANGELOG.asciidoc
+++ b/docs/CHANGELOG.asciidoc
@@ -1,7 +1,7 @@
 // Use these for links to issue and pulls. Note issues and pulls redirect one to
 // each other on Github, so don't worry too much on using the right prefix.
-// :issue: https://github.com/elastic/elasticsearch/issues/
-// :pull: https://github.com/elastic/elasticsearch/pull/
+:issue: https://github.com/elastic/elasticsearch/issues/
+:pull: https://github.com/elastic/elasticsearch/pull/
 
 = Elasticsearch Release Notes
 

From 65e5868a55d66b1a930b973b87258209a27836db Mon Sep 17 00:00:00 2001
From: Andy Bristol 
Date: Mon, 30 Apr 2018 16:35:26 -0700
Subject: [PATCH 33/68] [test] add java packaging test project (#30161)

[test] add java packaging test project

Adds a project for building and running packaging tests written in java
for portability. The vagrant tasks use jars on the packagingTest
configuration, which are built in the same project. No tests are added
yet.

Corresponding changes are not made to :x-pack:qa:vagrant because the
java packaging tests will all be consolidated into one project.

For #26741
---
 Vagrantfile                                   |  2 +
 .../vagrant/VagrantPropertiesExtension.groovy |  3 +
 .../gradle/vagrant/VagrantTestPlugin.groovy   | 58 ++++++++++++++-----
 qa/vagrant/build.gradle                       | 38 +++++++++++-
 .../packaging/PackagingTests.java             | 31 ++++++++++
 5 files changed, 114 insertions(+), 18 deletions(-)
 create mode 100644 qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java

diff --git a/Vagrantfile b/Vagrantfile
index 6f81ba0273c..6761fec07da 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -337,6 +337,7 @@ export BATS=/project/build/bats
 export BATS_UTILS=/project/build/packaging/bats/utils
 export BATS_TESTS=/project/build/packaging/bats/tests
 export PACKAGING_ARCHIVES=/project/build/packaging/archives
+export PACKAGING_TESTS=/project/build/packaging/tests
 VARS
     cat \<\ /etc/sudoers.d/elasticsearch_vars
 Defaults   env_keep += "ZIP"
@@ -347,6 +348,7 @@ Defaults   env_keep += "BATS"
 Defaults   env_keep += "BATS_UTILS"
 Defaults   env_keep += "BATS_TESTS"
 Defaults   env_keep += "PACKAGING_ARCHIVES"
+Defaults   env_keep += "PACKAGING_TESTS"
 SUDOERS_VARS
     chmod 0440 /etc/sudoers.d/elasticsearch_vars
   SHELL
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy
index 264a1e0f8ac..e9b664a5a31 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy
@@ -41,6 +41,9 @@ class VagrantPropertiesExtension {
     @Input
     Boolean inheritTestUtils
 
+    @Input
+    String testClass
+
     VagrantPropertiesExtension(List availableBoxes) {
         this.boxes = availableBoxes
         this.batsDir = 'src/test/resources/packaging'
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy
index 7a0b9f96781..bb85359ae3f 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy
@@ -51,6 +51,7 @@ class VagrantTestPlugin implements Plugin {
     static List UPGRADE_FROM_ARCHIVES = ['rpm', 'deb']
 
     private static final PACKAGING_CONFIGURATION = 'packaging'
+    private static final PACKAGING_TEST_CONFIGURATION = 'packagingTest'
     private static final BATS = 'bats'
     private static final String BATS_TEST_COMMAND ="cd \$PACKAGING_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS"
     private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && ./gradlew test integTest"
@@ -66,6 +67,7 @@ class VagrantTestPlugin implements Plugin {
 
         // Creates custom configurations for Bats testing files (and associated scripts and archives)
         createPackagingConfiguration(project)
+        project.configurations.create(PACKAGING_TEST_CONFIGURATION)
 
         // Creates all the main Vagrant tasks
         createVagrantTasks(project)
@@ -144,10 +146,12 @@ class VagrantTestPlugin implements Plugin {
     }
 
     private static void createCleanTask(Project project) {
-        project.tasks.create('clean', Delete.class) {
-            description 'Clean the project build directory'
-            group 'Build'
-            delete project.buildDir
+        if (project.tasks.findByName('clean') == null) {
+            project.tasks.create('clean', Delete.class) {
+                description 'Clean the project build directory'
+                group 'Build'
+                delete project.buildDir
+            }
         }
     }
 
@@ -174,6 +178,18 @@ class VagrantTestPlugin implements Plugin {
             from project.configurations[PACKAGING_CONFIGURATION]
         }
 
+        File testsDir = new File(packagingDir, 'tests')
+        Copy copyPackagingTests = project.tasks.create('copyPackagingTests', Copy) {
+            into testsDir
+            from project.configurations[PACKAGING_TEST_CONFIGURATION]
+        }
+
+        Task createTestRunnerScript = project.tasks.create('createTestRunnerScript', FileContentsTask) {
+            dependsOn copyPackagingTests
+            file "${testsDir}/run-tests.sh"
+            contents "java -cp \"\$PACKAGING_TESTS/*\" org.junit.runner.JUnitCore ${-> project.extensions.esvagrant.testClass}"
+        }
+
         Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) {
             dependsOn copyPackagingArchives
             file "${archivesDir}/version"
@@ -234,7 +250,8 @@ class VagrantTestPlugin implements Plugin {
 
         Task vagrantSetUpTask = project.tasks.create('setupPackagingTest')
         vagrantSetUpTask.dependsOn 'vagrantCheckVersion'
-        vagrantSetUpTask.dependsOn copyPackagingArchives, createVersionFile, createUpgradeFromFile, createUpgradeIsOssFile
+        vagrantSetUpTask.dependsOn copyPackagingArchives, copyPackagingTests, createTestRunnerScript
+        vagrantSetUpTask.dependsOn createVersionFile, createUpgradeFromFile, createUpgradeIsOssFile
         vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils
     }
 
@@ -393,20 +410,29 @@ class VagrantTestPlugin implements Plugin {
                 packagingTest.dependsOn(batsPackagingTest)
             }
 
-            // This task doesn't do anything yet. In the future it will execute a jar containing tests on the vm
-            Task groovyPackagingTest = project.tasks.create("vagrant${boxTask}#groovyPackagingTest")
-            groovyPackagingTest.dependsOn(up)
-            groovyPackagingTest.finalizedBy(halt)
-
-            TaskExecutionAdapter groovyPackagingReproListener = createReproListener(project, groovyPackagingTest.path)
-            groovyPackagingTest.doFirst {
-                project.gradle.addListener(groovyPackagingReproListener)
+            Task javaPackagingTest = project.tasks.create("vagrant${boxTask}#javaPackagingTest", VagrantCommandTask) {
+                command 'ssh'
+                boxName box
+                environmentVars vagrantEnvVars
+                dependsOn up, setupPackagingTest
+                finalizedBy halt
+                args '--command', "bash \"\$PACKAGING_TESTS/run-tests.sh\""
             }
-            groovyPackagingTest.doLast {
-                project.gradle.removeListener(groovyPackagingReproListener)
+
+            // todo remove this onlyIf after all packaging tests are consolidated
+            javaPackagingTest.onlyIf {
+                project.extensions.esvagrant.testClass != null
+            }
+
+            TaskExecutionAdapter javaPackagingReproListener = createReproListener(project, javaPackagingTest.path)
+            javaPackagingTest.doFirst {
+                project.gradle.addListener(javaPackagingReproListener)
+            }
+            javaPackagingTest.doLast {
+                project.gradle.removeListener(javaPackagingReproListener)
             }
             if (project.extensions.esvagrant.boxes.contains(box)) {
-                packagingTest.dependsOn(groovyPackagingTest)
+                packagingTest.dependsOn(javaPackagingTest)
             }
 
             Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) {
diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle
index 2b1ffb28081..52a6bb1efb5 100644
--- a/qa/vagrant/build.gradle
+++ b/qa/vagrant/build.gradle
@@ -1,3 +1,5 @@
+import org.elasticsearch.gradle.precommit.PrecommitTasks
+
 /*
  * Licensed to Elasticsearch under one or more contributor
  * license agreements. See the NOTICE file distributed with
@@ -17,8 +19,23 @@
  * under the License.
  */
 
-apply plugin: 'elasticsearch.vagrantsupport'
-apply plugin: 'elasticsearch.vagrant'
+plugins {
+  id 'java'
+  id 'elasticsearch.build'
+  id 'elasticsearch.vagrantsupport'
+  id 'elasticsearch.vagrant'
+}
+
+dependencies {
+  compile "junit:junit:${versions.junit}"
+  compile "org.hamcrest:hamcrest-core:${versions.hamcrest}"
+
+  // needs to be on the classpath for JarHell
+  testRuntime project(':libs:elasticsearch-core')
+
+  // pulls in the jar built by this project and its dependencies
+  packagingTest project(path: project.path, configuration: 'runtime')
+}
 
 List plugins = []
 for (Project subproj : project.rootProject.subprojects) {
@@ -39,3 +56,20 @@ setupPackagingTest {
     expectedPlugins.setText(plugins.join('\n'), 'UTF-8')
   }
 }
+
+esvagrant {
+  testClass 'org.elasticsearch.packaging.PackagingTests'
+}
+
+forbiddenApisMain {
+  signaturesURLs = [
+    PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')
+  ]
+}
+
+// we don't have additional tests for the tests themselves
+tasks.test.enabled = false
+
+// this project doesn't get published
+tasks.dependencyLicenses.enabled = false
+tasks.dependenciesInfo.enabled = false
diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java
new file mode 100644
index 00000000000..0b5e7a3b6e0
--- /dev/null
+++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.packaging;
+
+import org.junit.Test;
+
+/**
+ * This class doesn't have any tests yet
+ */
+public class PackagingTests {
+
+    @Test
+    public void testDummy() {}
+}

From 4a537ef03cc83ad4774258216a819ae8e549edae Mon Sep 17 00:00:00 2001
From: Boaz Leskes 
Date: Tue, 1 May 2018 08:15:02 +0200
Subject: [PATCH 34/68] Bulk operation fail to replicate operations when a
 mapping update times out (#30244)

Starting with the refactoring in https://github.com/elastic/elasticsearch/pull/22778 (released in 5.3) we may fail to properly replicate operation when a mapping update on master fails. If a bulk
operations needs a mapping update half way, it will send a request to the master before continuing
to index the operations. If that request times out or isn't acked (i.e., even one node in the cluster
didn't process it within 30s), we end up throwing the exception and aborting the entire bulk. This is
a problem because all operations that were processed so far are not replicated any more to the
replicas.  Although these operations were never "acked" to the user (we threw an error) it cause the
local checkpoint on the replicas to lag (on 6.x) and the primary and replica to diverge.

This PR does a couple of things:
1) Most importantly, treat *any* mapping update failure as a document level failure, meaning only
    the relevant indexing operation will fail.
2) Removes the mapping update callbacks from `IndexShard.applyIndexOperationOnPrimary` and
    similar methods for simpler execution. We don't use exceptions any more when a mapping
    update was successful.

I think we need to do more work here (the fact that a single slow node can prevent those mappings
updates from being acked and thus fail operations is bad), but I want to keep this as small as I can
(it is already too big).
---
 .../action/bulk/MappingUpdatePerformer.java   |   7 -
 .../action/bulk/TransportShardBulkAction.java | 149 +++++++-----
 .../TransportResyncReplicationAction.java     |  10 +-
 .../replication/TransportWriteAction.java     |   8 +-
 .../elasticsearch/index/IndexingSlowLog.java  |   2 +-
 .../elasticsearch/index/engine/Engine.java    |  64 +++++-
 .../index/engine/InternalEngine.java          |   8 +-
 .../elasticsearch/index/shard/IndexShard.java |  66 +++---
 .../index/shard/InternalIndexingStats.java    |  56 +++--
 .../indices/IndexingMemoryController.java     |   4 +-
 .../indices/recovery/RecoveryTarget.java      |   7 +-
 .../bulk/TransportShardBulkActionTests.java   | 194 ++++++----------
 .../discovery/MasterDisruptionIT.java         |  57 +++++
 .../index/engine/InternalEngineTests.java     |  34 +--
 .../index/mapper/TextFieldMapperTests.java    |   4 +-
 .../IndexLevelReplicationTests.java           |   8 +-
 .../RecoveryDuringReplicationTests.java       |   9 +-
 .../index/shard/IndexShardIT.java             |   8 +-
 .../index/shard/IndexShardTests.java          | 214 +++++++++---------
 .../shard/IndexingOperationListenerTests.java |  26 ++-
 .../shard/PrimaryReplicaSyncerTests.java      |   8 +-
 .../PeerRecoveryTargetServiceTests.java       |   4 +-
 .../indices/recovery/RecoveryTests.java       |  18 +-
 .../BlobStoreRepositoryRestoreTests.java      |   6 +-
 .../index/shard/IndexShardTestCase.java       |  44 ++--
 .../test/ESSingleNodeTestCase.java            |   4 +-
 .../BlockMasterServiceOnMaster.java           | 107 +++++++++
 .../watcher/WatcherIndexingListenerTests.java |   5 +-
 28 files changed, 662 insertions(+), 469 deletions(-)
 create mode 100644 test/framework/src/main/java/org/elasticsearch/test/disruption/BlockMasterServiceOnMaster.java

diff --git a/server/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java b/server/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java
index 7f16b7c4d6d..1f228b0f355 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java
@@ -29,11 +29,4 @@ public interface MappingUpdatePerformer {
      */
     void updateMappings(Mapping update, ShardId shardId, String type);
 
-    /**
-     *  Throws a {@code ReplicationOperation.RetryOnPrimaryException} if the operation needs to be
-     * retried on the primary due to the mappings not being present yet, or a different exception if
-     * updating the mappings on the master failed.
-     */
-    void verifyMappings(Mapping update, ShardId shardId);
-
 }
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
index 260c75692e1..57ecb250f7c 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
@@ -43,6 +43,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
 import org.elasticsearch.cluster.metadata.MappingMetaData;
 import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.CheckedSupplier;
 import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.collect.Tuple;
 import org.elasticsearch.common.inject.Inject;
@@ -64,7 +65,9 @@ import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.transport.TransportRequestOptions;
 import org.elasticsearch.transport.TransportService;
 
+import java.io.IOException;
 import java.util.Map;
+import java.util.function.Function;
 import java.util.function.LongSupplier;
 
 /** Performs shard-level bulk (index, delete or update) operations */
@@ -137,12 +140,15 @@ public class TransportShardBulkAction extends TransportWriteAction {
-                        throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(),
-                            "Mappings are not available on the replica yet, triggered update: " + update);
-                    });
+                    indexRequest.isRetry(), sourceToParse);
+                break;
             case DELETE:
                 DeleteRequest deleteRequest = (DeleteRequest) docWriteRequest;
-                return replica.applyDeleteOperationOnReplica(primaryResponse.getSeqNo(), primaryResponse.getVersion(),
-                    deleteRequest.type(), deleteRequest.id(), deleteRequest.versionType().versionTypeForReplicationAndRecovery(),
-                    update -> {
-                        throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(),
-                            "Mappings are not available on the replica yet, triggered update: " + update);
-                    });
+                result =  replica.applyDeleteOperationOnReplica(primaryResponse.getSeqNo(), primaryResponse.getVersion(),
+                    deleteRequest.type(), deleteRequest.id(), deleteRequest.versionType().versionTypeForReplicationAndRecovery());
+                break;
             default:
                 throw new IllegalStateException("Unexpected request operation type on replica: "
                     + docWriteRequest.opType().getLowercase());
         }
+        if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
+            throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(),
+                "Mappings are not available on the replica yet, triggered update: " + result.getRequiredMappingUpdate());
+        }
+        return result;
     }
 
     /** Executes index operation on primary shard after updates mapping if dynamic mappings are found */
@@ -539,50 +555,61 @@ public class TransportShardBulkAction extends TransportWriteAction {
-                    mappingUpdater.updateMappings(update, primary.shardId(), sourceToParse.type());
-                    throw new ReplicationOperation.RetryOnPrimaryException(primary.shardId(), "Mapping updated");
-                });
-        } catch (ReplicationOperation.RetryOnPrimaryException e) {
-            return primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse,
-                request.getAutoGeneratedTimestamp(), request.isRetry(), update -> mappingUpdater.verifyMappings(update, primary.shardId()));
-        }
+        return executeOnPrimaryWhileHandlingMappingUpdates(primary.shardId(), request.type(),
+            () ->
+                primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse,
+                    request.getAutoGeneratedTimestamp(), request.isRetry()),
+            e -> new Engine.IndexResult(e, request.version()),
+            mappingUpdater);
     }
 
     private static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary,
                                                                      MappingUpdatePerformer mappingUpdater) throws Exception {
-        try {
-            return primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType(),
-                update -> {
-                    mappingUpdater.updateMappings(update, primary.shardId(), request.type());
-                    throw new ReplicationOperation.RetryOnPrimaryException(primary.shardId(), "Mapping updated");
-                });
-        } catch (ReplicationOperation.RetryOnPrimaryException e) {
-            return primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType(),
-                update -> mappingUpdater.verifyMappings(update, primary.shardId()));
+        return executeOnPrimaryWhileHandlingMappingUpdates(primary.shardId(), request.type(),
+            () -> primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType()),
+            e -> new Engine.DeleteResult(e, request.version()),
+            mappingUpdater);
+    }
+
+    private static  T executeOnPrimaryWhileHandlingMappingUpdates(ShardId shardId, String type,
+                                                                                           CheckedSupplier toExecute,
+                                                                                           Function onError,
+                                                                                           MappingUpdatePerformer mappingUpdater)
+        throws IOException {
+        T result = toExecute.get();
+        if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
+            // try to update the mappings and try again.
+            try {
+                mappingUpdater.updateMappings(result.getRequiredMappingUpdate(), shardId, type);
+            } catch (Exception e) {
+                // failure to update the mapping should translate to a failure of specific requests. Other requests
+                // still need to be executed and replicated.
+                return onError.apply(e);
+            }
+
+            result = toExecute.get();
+
+            if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
+                // double mapping update. We assume that the successful mapping update wasn't yet processed on the node
+                // and retry the entire request again.
+                throw new ReplicationOperation.RetryOnPrimaryException(shardId,
+                    "Dynamic mappings are not available on the node that holds the primary yet");
+            }
         }
+        assert result.getFailure() instanceof ReplicationOperation.RetryOnPrimaryException == false :
+            "IndexShard shouldn't use RetryOnPrimaryException. got " + result.getFailure();
+        return result;
+
     }
 
     class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer {
 
         public void updateMappings(final Mapping update, final ShardId shardId, final String type) {
-            if (update != null) {
-                // can throw timeout exception when updating mappings or ISE for attempting to
-                // update default mappings which are bubbled up
-                mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), type, update);
-            }
-        }
-
-        public void verifyMappings(final Mapping update, final ShardId shardId) {
-            if (update != null) {
-                throw new ReplicationOperation.RetryOnPrimaryException(shardId,
-                        "Dynamic mappings are not available on the node that holds the primary yet");
-            }
+            assert update != null;
+            assert shardId != null;
+            // can throw timeout exception when updating mappings or ISE for attempting to
+            // update default mappings which are bubbled up
+            mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), type, update);
         }
     }
 }
diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java
index c182fb24ffb..3dd2bd4df58 100644
--- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java
+++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java
@@ -122,11 +122,11 @@ public class TransportResyncReplicationAction extends TransportWriteAction {
-                        throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(),
-                            "Mappings are not available on the replica yet, triggered update: " + update);
-                    });
+                final Engine.Result operationResult = replica.applyTranslogOperation(operation, Engine.Operation.Origin.REPLICA);
+                if (operationResult.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
+                    throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(),
+                        "Mappings are not available on the replica yet, triggered update: " + operationResult.getRequiredMappingUpdate());
+                }
                 location = syncOperationResultOrThrow(operationResult, location);
             } catch (Exception e) {
                 // if its not a failure to be ignored, let it bubble up
diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java
index bdddcddaa2e..b14fd156b73 100644
--- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java
@@ -33,23 +33,17 @@ import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.Nullable;
 import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.index.VersionType;
 import org.elasticsearch.index.engine.Engine;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.Mapping;
-import org.elasticsearch.index.mapper.SourceToParse;
 import org.elasticsearch.index.shard.IndexShard;
 import org.elasticsearch.index.shard.ShardId;
 import org.elasticsearch.index.translog.Translog;
 import org.elasticsearch.index.translog.Translog.Location;
 import org.elasticsearch.indices.IndicesService;
-import org.elasticsearch.node.NodeClosedException;
 import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportException;
 import org.elasticsearch.transport.TransportResponse;
 import org.elasticsearch.transport.TransportService;
 
-import java.io.IOException;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
@@ -78,7 +72,7 @@ public abstract class TransportWriteAction<
     protected static Location syncOperationResultOrThrow(final Engine.Result operationResult,
                                                          final Location currentLocation) throws Exception {
         final Location location;
-        if (operationResult.hasFailure()) {
+        if (operationResult.getFailure() != null) {
             // check if any transient write operation failures should be bubbled up
             Exception failure = operationResult.getFailure();
             assert failure instanceof MapperParsingException : "expected mapper parsing failures. got " + failure;
diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java
index b75cda5b6ca..8293f873c65 100644
--- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java
+++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java
@@ -144,7 +144,7 @@ public final class IndexingSlowLog implements IndexingOperationListener {
 
     @Override
     public void postIndex(ShardId shardId, Engine.Index indexOperation, Engine.IndexResult result) {
-        if (result.hasFailure() == false) {
+        if (result.getResultType() == Engine.Result.Type.SUCCESS) {
             final ParsedDocument doc = indexOperation.parsedDoc();
             final long tookInNanos = result.getTook();
             if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) {
diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java
index 4c782cb5004..e1ebd0ecc29 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java
@@ -58,6 +58,7 @@ import org.elasticsearch.common.metrics.CounterMetric;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.util.concurrent.ReleasableLock;
 import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.mapper.Mapping;
 import org.elasticsearch.index.mapper.ParseContext.Document;
 import org.elasticsearch.index.mapper.ParsedDocument;
 import org.elasticsearch.index.merge.MergeStats;
@@ -295,27 +296,45 @@ public abstract class Engine implements Closeable {
      **/
     public abstract static class Result {
         private final Operation.TYPE operationType;
+        private final Result.Type resultType;
         private final long version;
         private final long seqNo;
         private final Exception failure;
         private final SetOnce freeze = new SetOnce<>();
+        private final Mapping requiredMappingUpdate;
         private Translog.Location translogLocation;
         private long took;
 
         protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo) {
             this.operationType = operationType;
-            this.failure = failure;
+            this.failure = Objects.requireNonNull(failure);
             this.version = version;
             this.seqNo = seqNo;
+            this.requiredMappingUpdate = null;
+            this.resultType = Type.FAILURE;
         }
 
         protected Result(Operation.TYPE operationType, long version, long seqNo) {
-            this(operationType, null, version, seqNo);
+            this.operationType = operationType;
+            this.version = version;
+            this.seqNo = seqNo;
+            this.failure = null;
+            this.requiredMappingUpdate = null;
+            this.resultType = Type.SUCCESS;
         }
 
-        /** whether the operation had failure */
-        public boolean hasFailure() {
-            return failure != null;
+        protected Result(Operation.TYPE operationType, Mapping requiredMappingUpdate) {
+            this.operationType = operationType;
+            this.version = Versions.NOT_FOUND;
+            this.seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
+            this.failure = null;
+            this.requiredMappingUpdate = requiredMappingUpdate;
+            this.resultType = Type.MAPPING_UPDATE_REQUIRED;
+        }
+
+        /** whether the operation was successful, has failed or was aborted due to a mapping update */
+        public Type getResultType() {
+            return resultType;
         }
 
         /** get the updated document version */
@@ -332,6 +351,14 @@ public abstract class Engine implements Closeable {
             return seqNo;
         }
 
+        /**
+         * If the operation was aborted due to missing mappings, this method will return the mappings
+         * that are required to complete the operation.
+         */
+        public Mapping getRequiredMappingUpdate() {
+            return requiredMappingUpdate;
+        }
+
         /** get the translog location after executing the operation */
         public Translog.Location getTranslogLocation() {
             return translogLocation;
@@ -371,6 +398,11 @@ public abstract class Engine implements Closeable {
             freeze.set(true);
         }
 
+        public enum Type {
+            SUCCESS,
+            FAILURE,
+            MAPPING_UPDATE_REQUIRED
+        }
     }
 
     public static class IndexResult extends Result {
@@ -383,9 +415,8 @@ public abstract class Engine implements Closeable {
         }
 
         /**
-         * use in case of index operation failed before getting to internal engine
-         * (e.g while preparing operation or updating mappings)
-         * */
+         * use in case of the index operation failed before getting to internal engine
+         **/
         public IndexResult(Exception failure, long version) {
             this(failure, version, SequenceNumbers.UNASSIGNED_SEQ_NO);
         }
@@ -395,6 +426,11 @@ public abstract class Engine implements Closeable {
             this.created = false;
         }
 
+        public IndexResult(Mapping requiredMappingUpdate) {
+            super(Operation.TYPE.INDEX, requiredMappingUpdate);
+            this.created = false;
+        }
+
         public boolean isCreated() {
             return created;
         }
@@ -410,11 +446,23 @@ public abstract class Engine implements Closeable {
             this.found = found;
         }
 
+        /**
+         * use in case of the delete operation failed before getting to internal engine
+         **/
+        public DeleteResult(Exception failure, long version) {
+            this(failure, version, SequenceNumbers.UNASSIGNED_SEQ_NO, false);
+        }
+
         public DeleteResult(Exception failure, long version, long seqNo, boolean found) {
             super(Operation.TYPE.DELETE, failure, version, seqNo);
             this.found = found;
         }
 
+        public DeleteResult(Mapping requiredMappingUpdate) {
+            super(Operation.TYPE.DELETE, requiredMappingUpdate);
+            this.found = false;
+        }
+
         public boolean isFound() {
             return found;
         }
diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
index f89595c1c23..bca84f81a29 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
@@ -765,7 +765,7 @@ public class InternalEngine extends Engine {
                 final IndexResult indexResult;
                 if (plan.earlyResultOnPreFlightError.isPresent()) {
                     indexResult = plan.earlyResultOnPreFlightError.get();
-                    assert indexResult.hasFailure();
+                    assert indexResult.getResultType() == Result.Type.FAILURE : indexResult.getResultType();
                 } else if (plan.indexIntoLucene) {
                     indexResult = indexIntoLucene(index, plan);
                 } else {
@@ -774,7 +774,7 @@ public class InternalEngine extends Engine {
                 }
                 if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
                     final Translog.Location location;
-                    if (indexResult.hasFailure() == false) {
+                    if (indexResult.getResultType() == Result.Type.SUCCESS) {
                         location = translog.add(new Translog.Index(index, indexResult));
                     } else if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) {
                         // if we have document failure, record it as a no-op in the translog with the generated seq_no
@@ -784,7 +784,7 @@ public class InternalEngine extends Engine {
                     }
                     indexResult.setTranslogLocation(location);
                 }
-                if (plan.indexIntoLucene && indexResult.hasFailure() == false) {
+                if (plan.indexIntoLucene && indexResult.getResultType() == Result.Type.SUCCESS) {
                     final Translog.Location translogLocation = trackTranslogLocation.get() ? indexResult.getTranslogLocation() : null;
                     versionMap.maybePutIndexUnderLock(index.uid().bytes(),
                         new IndexVersionValue(translogLocation, plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm()));
@@ -1087,7 +1087,7 @@ public class InternalEngine extends Engine {
             }
             if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
                 final Translog.Location location;
-                if (deleteResult.hasFailure() == false) {
+                if (deleteResult.getResultType() == Result.Type.SUCCESS) {
                     location = translog.add(new Translog.Delete(delete, deleteResult));
                 } else if (deleteResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) {
                     location = translog.add(new Translog.NoOp(deleteResult.getSeqNo(),
diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index def6362e334..92240e9b463 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -636,22 +636,21 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
     }
 
     public Engine.IndexResult applyIndexOperationOnPrimary(long version, VersionType versionType, SourceToParse sourceToParse,
-                                                           long autoGeneratedTimestamp, boolean isRetry,
-                                                           Consumer onMappingUpdate) throws IOException {
+                                                           long autoGeneratedTimestamp, boolean isRetry) throws IOException {
         return applyIndexOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, versionType, autoGeneratedTimestamp,
-            isRetry, Engine.Operation.Origin.PRIMARY, sourceToParse, onMappingUpdate);
+            isRetry, Engine.Operation.Origin.PRIMARY, sourceToParse);
     }
 
     public Engine.IndexResult applyIndexOperationOnReplica(long seqNo, long version, VersionType versionType,
-                                                           long autoGeneratedTimeStamp, boolean isRetry, SourceToParse sourceToParse,
-                                                           Consumer onMappingUpdate) throws IOException {
+                                                           long autoGeneratedTimeStamp, boolean isRetry, SourceToParse sourceToParse)
+        throws IOException {
         return applyIndexOperation(seqNo, primaryTerm, version, versionType, autoGeneratedTimeStamp, isRetry,
-            Engine.Operation.Origin.REPLICA, sourceToParse, onMappingUpdate);
+            Engine.Operation.Origin.REPLICA, sourceToParse);
     }
 
     private Engine.IndexResult applyIndexOperation(long seqNo, long opPrimaryTerm, long version, VersionType versionType,
                                                    long autoGeneratedTimeStamp, boolean isRetry, Engine.Operation.Origin origin,
-                                                   SourceToParse sourceToParse, Consumer onMappingUpdate) throws IOException {
+                                                   SourceToParse sourceToParse) throws IOException {
         assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]";
         assert versionType.validateVersionForWrites(version);
         ensureWriteAllowed(origin);
@@ -662,14 +661,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
                 autoGeneratedTimeStamp, isRetry);
             Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
             if (update != null) {
-                // wrap this in the outer catch block, as the master might also throw a MapperParsingException when updating the mapping
-                onMappingUpdate.accept(update);
+                return new Engine.IndexResult(update);
             }
-        } catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) {
-            return new Engine.IndexResult(e, version, seqNo);
         } catch (Exception e) {
+            // We treat any exception during parsing and or mapping update as a document level failure
+            // with the exception side effects of closing the shard. Since we don't have the shard, we
+            // can not raise an exception that may block any replication of previous operations to the
+            // replicas
             verifyNotClosed(e);
-            throw e;
+            return new Engine.IndexResult(e, version, seqNo);
         }
 
         return index(getEngine(), operation);
@@ -726,21 +726,19 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
         return engine.noOp(noOp);
     }
 
-    public Engine.DeleteResult applyDeleteOperationOnPrimary(long version, String type, String id, VersionType versionType,
-                                                             Consumer onMappingUpdate) throws IOException {
+    public Engine.DeleteResult applyDeleteOperationOnPrimary(long version, String type, String id, VersionType versionType)
+        throws IOException {
         return applyDeleteOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, type, id, versionType,
-            Engine.Operation.Origin.PRIMARY, onMappingUpdate);
+            Engine.Operation.Origin.PRIMARY);
     }
 
     public Engine.DeleteResult applyDeleteOperationOnReplica(long seqNo, long version, String type, String id,
-                                                             VersionType versionType,
-                                                             Consumer onMappingUpdate) throws IOException {
-        return applyDeleteOperation(seqNo, primaryTerm, version, type, id, versionType, Engine.Operation.Origin.REPLICA, onMappingUpdate);
+                                                             VersionType versionType) throws IOException {
+        return applyDeleteOperation(seqNo, primaryTerm, version, type, id, versionType, Engine.Operation.Origin.REPLICA);
     }
 
     private Engine.DeleteResult applyDeleteOperation(long seqNo, long opPrimaryTerm, long version, String type, String id,
-                                                     VersionType versionType, Engine.Operation.Origin origin,
-                                                     Consumer onMappingUpdate) throws IOException {
+                                                     VersionType versionType, Engine.Operation.Origin origin) throws IOException {
         assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]";
         assert versionType.validateVersionForWrites(version);
         ensureWriteAllowed(origin);
@@ -755,7 +753,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
         try {
             Mapping update = docMapper(type).getMapping();
             if (update != null) {
-                onMappingUpdate.accept(update);
+                return new Engine.DeleteResult(update);
             }
         } catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) {
             return new Engine.DeleteResult(e, version, seqNo, false);
@@ -1197,8 +1195,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
         assert currentEngineReference.get() == null;
     }
 
-    public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine.Operation.Origin origin,
-                                                Consumer onMappingUpdate) throws IOException {
+    public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine.Operation.Origin origin) throws IOException {
         final Engine.Result result;
         switch (operation.opType()) {
             case INDEX:
@@ -1208,13 +1205,12 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
                 result = applyIndexOperation(index.seqNo(), index.primaryTerm(), index.version(),
                     index.versionType().versionTypeForReplicationAndRecovery(), index.getAutoGeneratedIdTimestamp(), true, origin,
                     source(shardId.getIndexName(), index.type(), index.id(), index.source(),
-                        XContentHelper.xContentType(index.source()))
-                        .routing(index.routing()), onMappingUpdate);
+                        XContentHelper.xContentType(index.source())).routing(index.routing()));
                 break;
             case DELETE:
                 final Translog.Delete delete = (Translog.Delete) operation;
                 result = applyDeleteOperation(delete.seqNo(), delete.primaryTerm(), delete.version(), delete.type(), delete.id(),
-                    delete.versionType().versionTypeForReplicationAndRecovery(), origin, onMappingUpdate);
+                    delete.versionType().versionTypeForReplicationAndRecovery(), origin);
                 break;
             case NO_OP:
                 final Translog.NoOp noOp = (Translog.NoOp) operation;
@@ -1235,10 +1231,18 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
         while ((operation = snapshot.next()) != null) {
             try {
                 logger.trace("[translog] recover op {}", operation);
-                Engine.Result result = applyTranslogOperation(operation, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, update -> {
-                    throw new IllegalArgumentException("unexpected mapping update: " + update);
-                });
-                ExceptionsHelper.reThrowIfNotNull(result.getFailure());
+                Engine.Result result = applyTranslogOperation(operation, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY);
+                switch (result.getResultType()) {
+                    case FAILURE:
+                        throw result.getFailure();
+                    case MAPPING_UPDATE_REQUIRED:
+                        throw new IllegalArgumentException("unexpected mapping update: " + result.getRequiredMappingUpdate());
+                    case SUCCESS:
+                        break;
+                    default:
+                        throw new AssertionError("Unknown result type [" + result.getResultType() + "]");
+                }
+
                 opsRecovered++;
                 recoveryState.getTranslog().incrementRecoveredOperations();
             } catch (Exception e) {
@@ -1246,7 +1250,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
                     // mainly for MapperParsingException and Failure to detect xcontent
                     logger.info("ignoring recovery of a corrupt translog entry", e);
                 } else {
-                    throw e;
+                    throw ExceptionsHelper.convertToRuntime(e);
                 }
             }
         }
diff --git a/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java b/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java
index ada869a1d9c..e7b7b719aed 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java
@@ -66,7 +66,7 @@ final class InternalIndexingStats implements IndexingOperationListener {
 
     @Override
     public Engine.Index preIndex(ShardId shardId, Engine.Index operation) {
-        if (!operation.origin().isRecovery()) {
+        if (operation.origin().isRecovery() == false) {
             totalStats.indexCurrent.inc();
             typeStats(operation.type()).indexCurrent.inc();
         }
@@ -75,17 +75,22 @@ final class InternalIndexingStats implements IndexingOperationListener {
 
     @Override
     public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) {
-        if (result.hasFailure() == false) {
-            if (!index.origin().isRecovery()) {
-                long took = result.getTook();
-                totalStats.indexMetric.inc(took);
-                totalStats.indexCurrent.dec();
-                StatsHolder typeStats = typeStats(index.type());
-                typeStats.indexMetric.inc(took);
-                typeStats.indexCurrent.dec();
-            }
-        } else {
-            postIndex(shardId, index, result.getFailure());
+        switch (result.getResultType()) {
+            case SUCCESS:
+                if (index.origin().isRecovery() == false) {
+                    long took = result.getTook();
+                    totalStats.indexMetric.inc(took);
+                    totalStats.indexCurrent.dec();
+                    StatsHolder typeStats = typeStats(index.type());
+                    typeStats.indexMetric.inc(took);
+                    typeStats.indexCurrent.dec();
+                }
+                break;
+            case FAILURE:
+                postIndex(shardId, index, result.getFailure());
+                break;
+            default:
+                throw new IllegalArgumentException("unknown result type: " + result.getResultType());
         }
     }
 
@@ -111,17 +116,22 @@ final class InternalIndexingStats implements IndexingOperationListener {
 
     @Override
     public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) {
-        if (result.hasFailure() == false) {
-            if (!delete.origin().isRecovery()) {
-                long took = result.getTook();
-                totalStats.deleteMetric.inc(took);
-                totalStats.deleteCurrent.dec();
-                StatsHolder typeStats = typeStats(delete.type());
-                typeStats.deleteMetric.inc(took);
-                typeStats.deleteCurrent.dec();
-            }
-        } else {
-            postDelete(shardId, delete, result.getFailure());
+        switch (result.getResultType()) {
+            case SUCCESS:
+                if (!delete.origin().isRecovery()) {
+                    long took = result.getTook();
+                    totalStats.deleteMetric.inc(took);
+                    totalStats.deleteCurrent.dec();
+                    StatsHolder typeStats = typeStats(delete.type());
+                    typeStats.deleteMetric.inc(took);
+                    typeStats.deleteCurrent.dec();
+                }
+                break;
+            case FAILURE:
+                postDelete(shardId, delete, result.getFailure());
+                break;
+            default:
+                throw new IllegalArgumentException("unknown result type: " + result.getResultType());
         }
     }
 
diff --git a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java
index d8e2ec53547..ac5a5047464 100644
--- a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java
+++ b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java
@@ -34,8 +34,8 @@ import org.elasticsearch.index.shard.IndexShard;
 import org.elasticsearch.index.shard.IndexShardState;
 import org.elasticsearch.index.shard.IndexingOperationListener;
 import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.threadpool.Scheduler.Cancellable;
+import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.threadpool.ThreadPool.Names;
 
 import java.io.Closeable;
@@ -210,7 +210,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index
 
     /** called by IndexShard to record estimated bytes written to translog for the operation */
     private void recordOperationBytes(Engine.Operation operation, Engine.Result result) {
-        if (result.hasFailure() == false) {
+        if (result.getResultType() == Engine.Result.Type.SUCCESS) {
             statusChecker.bytesWritten(operation.estimatedSizeInBytes());
         }
     }
diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java
index 244bb462df6..57deb4666da 100644
--- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java
+++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java
@@ -394,10 +394,11 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
             throw new IndexShardNotRecoveringException(shardId, indexShard().state());
         }
         for (Translog.Operation operation : operations) {
-            Engine.Result result = indexShard().applyTranslogOperation(operation, Engine.Operation.Origin.PEER_RECOVERY, update -> {
+            Engine.Result result = indexShard().applyTranslogOperation(operation, Engine.Operation.Origin.PEER_RECOVERY);
+            if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
                 throw new MapperException("mapping updates are not allowed [" + operation + "]");
-            });
-            assert result.hasFailure() == false : "unexpected failure while replicating translog entry: " + result.getFailure();
+            }
+            assert result.getFailure() == null: "unexpected failure while replicating translog entry: " + result.getFailure();
             ExceptionsHelper.reThrowIfNotNull(result.getFailure());
         }
         // update stats only after all operations completed (to ensure that mapping updates don't mess with stats)
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java
index dd76564ca32..f4a72dccdcc 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java
@@ -82,7 +82,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
 
     private IndexMetaData indexMetaData() throws IOException {
         return IndexMetaData.builder("index")
-                .putMapping("type",
+                .putMapping("_doc",
                         "{\"properties\":{\"foo\":{\"type\":\"text\",\"fields\":" +
                                 "{\"keyword\":{\"type\":\"keyword\",\"ignore_above\":256}}}}}")
                 .settings(idxSettings)
@@ -91,7 +91,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
 
     public void testShouldExecuteReplicaItem() throws Exception {
         // Successful index request should be replicated
-        DocWriteRequest writeRequest = new IndexRequest("index", "type", "id")
+        DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id")
                 .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar");
         DocWriteResponse response = new IndexResponse(shardId, "type", "id", 1, 17, 1, randomBoolean());
         BulkItemRequest request = new BulkItemRequest(0, writeRequest);
@@ -100,7 +100,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
                 equalTo(ReplicaItemExecutionMode.NORMAL));
 
         // Failed index requests without sequence no should not be replicated
-        writeRequest = new IndexRequest("index", "type", "id")
+        writeRequest = new IndexRequest("index", "_doc", "id")
                 .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar");
         request = new BulkItemRequest(0, writeRequest);
         request.setPrimaryResponse(
@@ -137,9 +137,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
 
         BulkItemRequest[] items = new BulkItemRequest[1];
         boolean create = randomBoolean();
-        DocWriteRequest writeRequest = new IndexRequest("index", "type", "id")
-                .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar")
-                .create(create);
+        DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE).create(create);
         BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest);
         items[0] = primaryRequest;
         BulkShardRequest bulkShardRequest =
@@ -166,9 +164,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         // Assert that the document actually made it there
         assertDocCount(shard, 1);
 
-        writeRequest = new IndexRequest("index", "type", "id")
-                .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar")
-                .create(true);
+        writeRequest = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE).create(true);
         primaryRequest = new BulkItemRequest(0, writeRequest);
         items[0] = primaryRequest;
         bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
@@ -176,7 +172,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         Translog.Location secondLocation =
                 TransportShardBulkAction.executeBulkItemRequest( metaData,
                         shard, bulkShardRequest, newLocation, 0, updateHelper,
-                        threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer());
+                        threadPool::absoluteTimeInMillis, new ThrowingMappingUpdatePerformer(new RuntimeException("fail")));
 
         // Translog should not change, since the document was not indexed due to a version conflict
         assertThat(secondLocation, equalTo(newLocation));
@@ -193,7 +189,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
 
         BulkItemResponse.Failure failure = primaryResponse.getFailure();
         assertThat(failure.getIndex(), equalTo("index"));
-        assertThat(failure.getType(), equalTo("type"));
+        assertThat(failure.getType(), equalTo("_doc"));
         assertThat(failure.getId(), equalTo("id"));
         assertThat(failure.getCause().getClass(), equalTo(VersionConflictEngineException.class));
         assertThat(failure.getCause().getMessage(),
@@ -212,8 +208,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
 
         BulkItemRequest[] items = new BulkItemRequest[randomIntBetween(2, 5)];
         for (int i = 0; i < items.length; i++) {
-            DocWriteRequest writeRequest = new IndexRequest("index", "type", "id_" + i)
-                .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar-" + i)
+            DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id_" + i)
+                .source(Requests.INDEX_CONTENT_TYPE)
                 .opType(DocWriteRequest.OpType.INDEX);
             items[i] = new BulkItemRequest(i, writeRequest);
         }
@@ -240,7 +236,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
             BulkItemResponse response = result.finalResponseIfSuccessful.getResponses()[i];
             assertThat(response.getItemId(), equalTo(i));
             assertThat(response.getIndex(), equalTo("index"));
-            assertThat(response.getType(), equalTo("type"));
+            assertThat(response.getType(), equalTo("_doc"));
             assertThat(response.getId(), equalTo("id_" + i));
             assertThat(response.getOpType(), equalTo(DocWriteRequest.OpType.INDEX));
             if (response.getItemId() == rejectItem.id()) {
@@ -262,7 +258,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         IndexShard shard = newStartedShard(true);
 
         BulkItemRequest[] items = new BulkItemRequest[1];
-        DocWriteRequest writeRequest = new IndexRequest("index", "type", "id")
+        DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id")
                 .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar");
         items[0] = new BulkItemRequest(0, writeRequest);
         BulkShardRequest bulkShardRequest =
@@ -272,26 +268,20 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         UpdateHelper updateHelper = null;
 
         // Pretend the mappings haven't made it to the node yet, and throw a rejection
-        RuntimeException err = new ReplicationOperation.RetryOnPrimaryException(shardId, "rejection");
-
-        try {
-            TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest,
-                    location, 0, updateHelper, threadPool::absoluteTimeInMillis,
-                    new ThrowingVerifyingMappingUpdatePerformer(err));
-            fail("should have thrown a retry exception");
-        } catch (ReplicationOperation.RetryOnPrimaryException e) {
-            assertThat(e, equalTo(err));
-        }
+        expectThrows(ReplicationOperation.RetryOnPrimaryException.class,
+            () -> TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest,
+                location, 0, updateHelper, threadPool::absoluteTimeInMillis,
+                new NoopMappingUpdatePerformer()));
 
         closeShards(shard);
     }
 
-    public void testExecuteBulkIndexRequestWithConflictingMappings() throws Exception {
+    public void testExecuteBulkIndexRequestWithErrorWhileUpdatingMapping() throws Exception {
         IndexMetaData metaData = indexMetaData();
         IndexShard shard = newStartedShard(true);
 
         BulkItemRequest[] items = new BulkItemRequest[1];
-        DocWriteRequest writeRequest = new IndexRequest("index", "type", "id")
+        DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id")
                 .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar");
         items[0] = new BulkItemRequest(0, writeRequest);
         BulkShardRequest bulkShardRequest =
@@ -300,8 +290,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         Translog.Location location = new Translog.Location(0, 0, 0);
         UpdateHelper updateHelper = null;
 
-        // Return a mapping conflict (IAE) when trying to update the mapping
-        RuntimeException err = new IllegalArgumentException("mapping conflict");
+        // Return an exception when trying to update the mapping
+        RuntimeException err = new RuntimeException("some kind of exception");
 
         Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData,
                 shard, bulkShardRequest, location, 0, updateHelper,
@@ -318,13 +308,12 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         assertThat(primaryResponse.getId(), equalTo("id"));
         assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.INDEX));
         assertTrue(primaryResponse.isFailed());
-        assertThat(primaryResponse.getFailureMessage(), containsString("mapping conflict"));
+        assertThat(primaryResponse.getFailureMessage(), containsString("some kind of exception"));
         BulkItemResponse.Failure failure = primaryResponse.getFailure();
         assertThat(failure.getIndex(), equalTo("index"));
-        assertThat(failure.getType(), equalTo("type"));
+        assertThat(failure.getType(), equalTo("_doc"));
         assertThat(failure.getId(), equalTo("id"));
         assertThat(failure.getCause(), equalTo(err));
-        assertThat(failure.getStatus(), equalTo(RestStatus.BAD_REQUEST));
 
         closeShards(shard);
     }
@@ -334,7 +323,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         IndexShard shard = newStartedShard(true);
 
         BulkItemRequest[] items = new BulkItemRequest[1];
-        DocWriteRequest writeRequest = new DeleteRequest("index", "type", "id");
+        DocWriteRequest writeRequest = new DeleteRequest("index", "_doc", "id");
         items[0] = new BulkItemRequest(0, writeRequest);
         BulkShardRequest bulkShardRequest =
                 new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
@@ -366,16 +355,16 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         assertThat(response.getResult(), equalTo(DocWriteResponse.Result.NOT_FOUND));
         assertThat(response.getShardId(), equalTo(shard.shardId()));
         assertThat(response.getIndex(), equalTo("index"));
-        assertThat(response.getType(), equalTo("type"));
+        assertThat(response.getType(), equalTo("_doc"));
         assertThat(response.getId(), equalTo("id"));
         assertThat(response.getVersion(), equalTo(1L));
         assertThat(response.getSeqNo(), equalTo(0L));
         assertThat(response.forcedRefresh(), equalTo(false));
 
         // Now do the same after indexing the document, it should now find and delete the document
-        indexDoc(shard, "type", "id", "{\"foo\": \"bar\"}");
+        indexDoc(shard, "_doc", "id", "{}");
 
-        writeRequest = new DeleteRequest("index", "type", "id");
+        writeRequest = new DeleteRequest("index", "_doc", "id");
         items[0] = new BulkItemRequest(0, writeRequest);
         bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
 
@@ -405,7 +394,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         assertThat(response.getResult(), equalTo(DocWriteResponse.Result.DELETED));
         assertThat(response.getShardId(), equalTo(shard.shardId()));
         assertThat(response.getIndex(), equalTo("index"));
-        assertThat(response.getType(), equalTo("type"));
+        assertThat(response.getType(), equalTo("_doc"));
         assertThat(response.getId(), equalTo("id"));
         assertThat(response.getVersion(), equalTo(3L));
         assertThat(response.getSeqNo(), equalTo(2L));
@@ -416,11 +405,11 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
     }
 
     public void testNoopUpdateReplicaRequest() throws Exception {
-        DocWriteRequest writeRequest = new IndexRequest("index", "type", "id")
+        DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id")
                 .source(Requests.INDEX_CONTENT_TYPE, "field", "value");
         BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest);
 
-        DocWriteResponse noopUpdateResponse = new UpdateResponse(shardId, "index", "id", 0,
+        DocWriteResponse noopUpdateResponse = new UpdateResponse(shardId, "_doc", "id", 0,
                 DocWriteResponse.Result.NOOP);
         BulkItemResultHolder noopResults = new BulkItemResultHolder(noopUpdateResponse, null,
                 replicaRequest);
@@ -447,8 +436,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
     }
 
     public void testUpdateReplicaRequestWithFailure() throws Exception {
-        DocWriteRequest writeRequest = new IndexRequest("index", "type", "id")
-                .source(Requests.INDEX_CONTENT_TYPE, "field", "value");
+        DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE);
         BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest);
 
         Exception err = new ElasticsearchException("I'm dead <(x.x)>");
@@ -477,18 +465,17 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         assertThat(primaryResponse.getFailureMessage(), containsString("I'm dead <(x.x)>"));
         BulkItemResponse.Failure failure = primaryResponse.getFailure();
         assertThat(failure.getIndex(), equalTo("index"));
-        assertThat(failure.getType(), equalTo("type"));
+        assertThat(failure.getType(), equalTo("_doc"));
         assertThat(failure.getId(), equalTo("id"));
         assertThat(failure.getCause(), equalTo(err));
         assertThat(failure.getStatus(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
     }
 
     public void testUpdateReplicaRequestWithConflictFailure() throws Exception {
-        DocWriteRequest writeRequest = new IndexRequest("index", "type", "id")
-                .source(Requests.INDEX_CONTENT_TYPE, "field", "value");
+        DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE);
         BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest);
 
-        Exception err = new VersionConflictEngineException(shardId, "type", "id",
+        Exception err = new VersionConflictEngineException(shardId, "_doc", "id",
                 "I'm conflicted <(;_;)>");
         Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0);
         BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult,
@@ -515,21 +502,21 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         assertThat(primaryResponse.getFailureMessage(), containsString("I'm conflicted <(;_;)>"));
         BulkItemResponse.Failure failure = primaryResponse.getFailure();
         assertThat(failure.getIndex(), equalTo("index"));
-        assertThat(failure.getType(), equalTo("type"));
+        assertThat(failure.getType(), equalTo("_doc"));
         assertThat(failure.getId(), equalTo("id"));
         assertThat(failure.getCause(), equalTo(err));
         assertThat(failure.getStatus(), equalTo(RestStatus.CONFLICT));
     }
 
     public void testUpdateReplicaRequestWithSuccess() throws Exception {
-        DocWriteRequest writeRequest = new IndexRequest("index", "type", "id")
-                .source(Requests.INDEX_CONTENT_TYPE, "field", "value");
+        DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id")
+                .source(Requests.INDEX_CONTENT_TYPE);
         BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest);
 
         boolean created = randomBoolean();
         Translog.Location resultLocation = new Translog.Location(42, 42, 42);
         Engine.IndexResult indexResult = new FakeResult(1, 1, created, resultLocation);
-        DocWriteResponse indexResponse = new IndexResponse(shardId, "index", "id", 1, 17, 1, created);
+        DocWriteResponse indexResponse = new IndexResponse(shardId, "_doc", "id", 1, 17, 1, created);
         BulkItemResultHolder goodResults =
                 new BulkItemResultHolder(indexResponse, indexResult, replicaRequest);
 
@@ -558,8 +545,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
     public void testCalculateTranslogLocation() throws Exception {
         final Translog.Location original = new Translog.Location(0, 0, 0);
 
-        DocWriteRequest writeRequest = new IndexRequest("index", "type", "id")
-                .source(Requests.INDEX_CONTENT_TYPE, "field", "value");
+        DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id")
+            .source(Requests.INDEX_CONTENT_TYPE);
         BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest);
         BulkItemResultHolder results = new BulkItemResultHolder(null, null, replicaRequest);
 
@@ -567,7 +554,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
                 equalTo(original));
 
         boolean created = randomBoolean();
-        DocWriteResponse indexResponse = new IndexResponse(shardId, "index", "id", 1, 17, 1, created);
+        DocWriteResponse indexResponse = new IndexResponse(shardId, "_doc", "id", 1, 17, 1, created);
         Translog.Location newLocation = new Translog.Location(1, 1, 1);
         final long version = randomNonNegativeLong();
         final long seqNo = randomNonNegativeLong();
@@ -580,10 +567,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
 
     public void testNoOpReplicationOnPrimaryDocumentFailure() throws Exception {
         final IndexShard shard = spy(newStartedShard(false));
-        BulkItemRequest itemRequest = new BulkItemRequest(0,
-                new IndexRequest("index", "type")
-                        .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar")
-        );
+        BulkItemRequest itemRequest = new BulkItemRequest(0, new IndexRequest("index", "_doc").source(Requests.INDEX_CONTENT_TYPE));
         final String failureMessage = "simulated primary failure";
         final IOException exception = new IOException(failureMessage);
         itemRequest.setPrimaryResponse(new BulkItemResponse(0,
@@ -592,7 +576,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
                         DocWriteRequest.OpType.DELETE,
                         DocWriteRequest.OpType.INDEX
                 ),
-                new BulkItemResponse.Failure("index", "type", "1",
+                new BulkItemResponse.Failure("index", "_doc", "1",
                     exception, 1L)
         ));
         BulkItemRequest[] itemRequests = new BulkItemRequest[1];
@@ -609,33 +593,23 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         logger.info("--> metadata.getIndex(): {}", metaData.getIndex());
         final IndexShard shard = spy(newStartedShard(true));
 
-        IndexRequest request = new IndexRequest("index", "type", "id")
+        IndexRequest request = new IndexRequest("index", "_doc", "id")
                 .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar");
 
         final AtomicInteger updateCalled = new AtomicInteger(0);
-        final AtomicInteger verifyCalled = new AtomicInteger(0);
-        TransportShardBulkAction.executeIndexRequestOnPrimary(request, shard,
-                new MappingUpdatePerformer() {
-                    @Override
-                    public void updateMappings(Mapping update, ShardId shardId, String type) {
-                        // There should indeed be a mapping update
-                        assertNotNull(update);
-                        updateCalled.incrementAndGet();
-                    }
-
-                    @Override
-                    public void verifyMappings(Mapping update, ShardId shardId) {
-                        // No-op, will be called
-                        logger.info("--> verifying mappings noop");
-                        verifyCalled.incrementAndGet();
-                    }
-        });
+        expectThrows(ReplicationOperation.RetryOnPrimaryException.class,
+            () -> TransportShardBulkAction.executeIndexRequestOnPrimary(request, shard,
+                (update, shardId, type) -> {
+                    // There should indeed be a mapping update
+                    assertNotNull(update);
+                    updateCalled.incrementAndGet();
+                }));
 
         assertThat("mappings were \"updated\" once", updateCalled.get(), equalTo(1));
-        assertThat("mappings were \"verified\" once", verifyCalled.get(), equalTo(1));
+
 
         // Verify that the shard "executed" the operation twice
-        verify(shard, times(2)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyBoolean(), any());
+        verify(shard, times(2)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyBoolean());
 
         // Update the mapping, so the next mapping updater doesn't do anything
         final MapperService mapperService = shard.mapperService();
@@ -643,21 +617,11 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         mapperService.updateMapping(metaData);
 
         TransportShardBulkAction.executeIndexRequestOnPrimary(request, shard,
-                new MappingUpdatePerformer() {
-                    @Override
-                    public void updateMappings(Mapping update, ShardId shardId, String type) {
-                        fail("should not have had to update the mappings");
-                    }
-
-                    @Override
-                    public void verifyMappings(Mapping update, ShardId shardId) {
-                        fail("should not have had to update the mappings");
-                    }
-        });
+            (update, shardId, type) -> fail("should not have had to update the mappings"));
 
         // Verify that the shard "executed" the operation only once (2 for previous invocations plus
         // 1 for this execution)
-        verify(shard, times(3)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyBoolean(), any());
+        verify(shard, times(3)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyBoolean());
 
         closeShards(shard);
     }
@@ -678,16 +642,16 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
     public void testProcessUpdateResponse() throws Exception {
         IndexShard shard = newStartedShard(false);
 
-        UpdateRequest updateRequest = new UpdateRequest("index", "type", "id");
+        UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "id");
         BulkItemRequest request = new BulkItemRequest(0, updateRequest);
-        Exception err = new VersionConflictEngineException(shardId, "type", "id",
+        Exception err = new VersionConflictEngineException(shardId, "_doc", "id",
                 "I'm conflicted <(;_;)>");
         Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0);
         Engine.DeleteResult deleteResult = new Engine.DeleteResult(1, 1, true);
         DocWriteResponse.Result docWriteResult = DocWriteResponse.Result.CREATED;
         DocWriteResponse.Result deleteWriteResult = DocWriteResponse.Result.DELETED;
-        IndexRequest indexRequest = new IndexRequest("index", "type", "id");
-        DeleteRequest deleteRequest = new DeleteRequest("index", "type", "id");
+        IndexRequest indexRequest = new IndexRequest("index", "_doc", "id");
+        DeleteRequest deleteRequest = new DeleteRequest("index", "_doc", "id");
         UpdateHelper.Result translate = new UpdateHelper.Result(indexRequest, docWriteResult,
                 new HashMap(), XContentType.JSON);
         UpdateHelper.Result translateDelete = new UpdateHelper.Result(deleteRequest, deleteWriteResult,
@@ -733,30 +697,28 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         IndexShard shard = newStartedShard(true);
 
         Map source = new HashMap<>();
-        source.put("foo", "bar");
         BulkItemRequest[] items = new BulkItemRequest[1];
         boolean create = randomBoolean();
-        DocWriteRequest writeRequest = new IndexRequest("index", "type", "id")
-                .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar")
-                .create(create);
+        DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE).create(create);
         BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest);
         items[0] = primaryRequest;
         BulkShardRequest bulkShardRequest =
                 new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
 
         Translog.Location location = new Translog.Location(0, 0, 0);
-        IndexRequest indexRequest = new IndexRequest("index", "type", "id");
+        IndexRequest indexRequest = new IndexRequest("index", "_doc", "id");
         indexRequest.source(source);
 
         DocWriteResponse.Result docWriteResult = DocWriteResponse.Result.CREATED;
         UpdateHelper.Result translate = new UpdateHelper.Result(indexRequest, docWriteResult,
                 new HashMap(), XContentType.JSON);
         UpdateHelper updateHelper = new MockUpdateHelper(translate);
-        UpdateRequest updateRequest = new UpdateRequest("index", "type", "id");
+        UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "id");
         updateRequest.upsert(source);
 
         BulkItemResultHolder holder = TransportShardBulkAction.executeUpdateRequestOnce(updateRequest, shard, metaData,
-                "index", updateHelper, threadPool::absoluteTimeInMillis, primaryRequest, 0, new NoopMappingUpdatePerformer());
+                "index", updateHelper, threadPool::absoluteTimeInMillis, primaryRequest, 0,
+            new ThrowingMappingUpdatePerformer(new RuntimeException()));
 
         assertFalse(holder.isVersionConflict());
         assertNotNull(holder.response);
@@ -785,7 +747,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         source.put("foo", "bar");
         BulkItemRequest[] items = new BulkItemRequest[1];
         boolean create = randomBoolean();
-        DocWriteRequest writeRequest = new IndexRequest("index", "type", "id")
+        DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id")
                 .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar")
                 .create(create);
         BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest);
@@ -794,13 +756,13 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
                 new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
 
         Translog.Location location = new Translog.Location(0, 0, 0);
-        IndexRequest indexRequest = new IndexRequest("index", "type", "id");
+        IndexRequest indexRequest = new IndexRequest("index", "_doc", "id");
         indexRequest.source(source);
 
         DocWriteResponse.Result docWriteResult = DocWriteResponse.Result.CREATED;
         Exception prepareFailure = new IllegalArgumentException("I failed to do something!");
         UpdateHelper updateHelper = new FailingUpdateHelper(prepareFailure);
-        UpdateRequest updateRequest = new UpdateRequest("index", "type", "id");
+        UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "id");
         updateRequest.upsert(source);
 
         BulkItemResultHolder holder = TransportShardBulkAction.executeUpdateRequestOnce(updateRequest, shard, metaData,
@@ -812,7 +774,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         assertNotNull(holder.replicaRequest);
 
         Engine.IndexResult opResult = (Engine.IndexResult) holder.operationResult;
-        assertTrue(opResult.hasFailure());
+        assertThat(opResult.getResultType(), equalTo(Engine.Result.Type.FAILURE));
         assertFalse(opResult.isCreated());
         Exception e = opResult.getFailure();
         assertThat(e.getMessage(), containsString("I failed to do something!"));
@@ -822,7 +784,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         assertThat(replicaBulkRequest.request(), instanceOf(IndexRequest.class));
         IndexRequest replicaRequest = (IndexRequest) replicaBulkRequest.request();
         assertThat(replicaRequest.index(), equalTo("index"));
-        assertThat(replicaRequest.type(), equalTo("type"));
+        assertThat(replicaRequest.type(), equalTo("_doc"));
         assertThat(replicaRequest.id(), equalTo("id"));
         assertThat(replicaRequest.sourceAsMap(), equalTo(source));
 
@@ -889,9 +851,6 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
     public static class NoopMappingUpdatePerformer implements MappingUpdatePerformer {
         public void updateMappings(Mapping update, ShardId shardId, String type) {
         }
-
-        public void verifyMappings(Mapping update, ShardId shardId) {
-        }
     }
 
     /** Always throw the given exception */
@@ -904,24 +863,5 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
         public void updateMappings(Mapping update, ShardId shardId, String type) {
             throw e;
         }
-
-        public void verifyMappings(Mapping update, ShardId shardId) {
-            fail("should not have gotten to this point");
-        }
-    }
-
-    /** Always throw the given exception */
-    private class ThrowingVerifyingMappingUpdatePerformer implements MappingUpdatePerformer {
-        private final RuntimeException e;
-        ThrowingVerifyingMappingUpdatePerformer(RuntimeException e) {
-            this.e = e;
-        }
-
-        public void updateMappings(Mapping update, ShardId shardId, String type) {
-        }
-
-        public void verifyMappings(Mapping update, ShardId shardId) {
-            throw e;
-        }
     }
 }
diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java
index 43e3b2ef01b..f7716c6f146 100644
--- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java
+++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java
@@ -20,6 +20,10 @@
 package org.elasticsearch.discovery;
 
 import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
 import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.ClusterStateUpdateTask;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
@@ -30,14 +34,17 @@ import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.collect.Tuple;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.discovery.zen.ElectMasterService;
 import org.elasticsearch.discovery.zen.ZenDiscovery;
 import org.elasticsearch.monitor.jvm.HotThreads;
 import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.disruption.BlockMasterServiceOnMaster;
 import org.elasticsearch.test.disruption.IntermittentLongGCDisruption;
 import org.elasticsearch.test.disruption.LongGCDisruption;
 import org.elasticsearch.test.disruption.NetworkDisruption;
 import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions;
+import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
 import org.elasticsearch.test.disruption.SingleNodeDisruption;
 import org.elasticsearch.test.junit.annotations.TestLogging;
 
@@ -449,6 +456,56 @@ public class MasterDisruptionIT extends AbstractDisruptionTestCase {
 
     }
 
+    @TestLogging(
+        "_root:DEBUG,"
+            + "org.elasticsearch.action.bulk:TRACE,"
+            + "org.elasticsearch.action.get:TRACE,"
+            + "org.elasticsearch.cluster.service:TRACE,"
+            + "org.elasticsearch.discovery:TRACE,"
+            + "org.elasticsearch.indices.cluster:TRACE,"
+            + "org.elasticsearch.indices.recovery:TRACE,"
+            + "org.elasticsearch.index.seqno:TRACE,"
+            + "org.elasticsearch.index.shard:TRACE")
+    public void testMappingTimeout() throws Exception {
+        startCluster(3);
+        createIndex("test", Settings.builder()
+            .put("index.number_of_shards", 1)
+            .put("index.number_of_replicas", 1)
+            .put("index.routing.allocation.exclude._name", internalCluster().getMasterName())
+        .build());
+
+        // create one field
+        index("test", "doc", "1", "{ \"f\": 1 }");
+
+        ensureGreen();
+
+        assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
+            Settings.builder().put("indices.mapping.dynamic_timeout", "1ms")));
+
+        ServiceDisruptionScheme disruption = new BlockMasterServiceOnMaster(random());
+        setDisruptionScheme(disruption);
+
+        disruption.startDisrupting();
+
+        BulkRequestBuilder bulk = client().prepareBulk();
+        bulk.add(client().prepareIndex("test", "doc", "2").setSource("{ \"f\": 1 }", XContentType.JSON));
+        bulk.add(client().prepareIndex("test", "doc", "3").setSource("{ \"g\": 1 }", XContentType.JSON));
+        bulk.add(client().prepareIndex("test", "doc", "4").setSource("{ \"f\": 1 }", XContentType.JSON));
+        BulkResponse bulkResponse = bulk.get();
+        assertTrue(bulkResponse.hasFailures());
+
+        disruption.stopDisrupting();
+
+        assertBusy(() -> {
+            IndicesStatsResponse stats = client().admin().indices().prepareStats("test").clear().get();
+            for (ShardStats shardStats : stats.getShards()) {
+                assertThat(shardStats.getShardRouting().toString(),
+                    shardStats.getSeqNoStats().getGlobalCheckpoint(), equalTo(shardStats.getSeqNoStats().getLocalCheckpoint()));
+            }
+        });
+
+    }
+
     void assertDiscoveryCompleted(List nodes) throws InterruptedException {
         for (final String node : nodes) {
             assertTrue(
diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
index e769485443a..e399c02d6cc 100644
--- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
+++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
@@ -1380,7 +1380,7 @@ public class InternalEngineTests extends EngineTestCase {
 
         create = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false);
         indexResult = engine.index(create);
-        assertTrue(indexResult.hasFailure());
+        assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.FAILURE));
         assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class));
     }
 
@@ -1478,7 +1478,7 @@ public class InternalEngineTests extends EngineTestCase {
                 // intentional
                 assertThat(result.isCreated(), equalTo(firstOp));
                 assertThat(result.getVersion(), equalTo(op.version()));
-                assertThat(result.hasFailure(), equalTo(false));
+                assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS));
 
             } else {
                 Engine.DeleteResult result = replicaEngine.delete((Engine.Delete) op);
@@ -1489,7 +1489,7 @@ public class InternalEngineTests extends EngineTestCase {
                 // intentional
                 assertThat(result.isFound(), equalTo(firstOp == false));
                 assertThat(result.getVersion(), equalTo(op.version()));
-                assertThat(result.hasFailure(), equalTo(false));
+                assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS));
             }
             if (randomBoolean()) {
                 engine.refresh("test");
@@ -1621,13 +1621,13 @@ public class InternalEngineTests extends EngineTestCase {
                     Engine.IndexResult result = engine.index(indexWithVersion.apply(conflictingVersion, index));
                     assertThat(result.isCreated(), equalTo(false));
                     assertThat(result.getVersion(), equalTo(lastOpVersion));
-                    assertThat(result.hasFailure(), equalTo(true));
+                    assertThat(result.getResultType(), equalTo(Engine.Result.Type.FAILURE));
                     assertThat(result.getFailure(), instanceOf(VersionConflictEngineException.class));
                 } else {
                     Engine.IndexResult result = engine.index(versionedOp ? indexWithVersion.apply(correctVersion, index) : index);
                     assertThat(result.isCreated(), equalTo(docDeleted));
                     assertThat(result.getVersion(), equalTo(Math.max(lastOpVersion + 1, 1)));
-                    assertThat(result.hasFailure(), equalTo(false));
+                    assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS));
                     assertThat(result.getFailure(), nullValue());
                     lastFieldValue = index.docs().get(0).get("value");
                     docDeleted = false;
@@ -1641,13 +1641,13 @@ public class InternalEngineTests extends EngineTestCase {
                     Engine.DeleteResult result = engine.delete(delWithVersion.apply(conflictingVersion, delete));
                     assertThat(result.isFound(), equalTo(docDeleted == false));
                     assertThat(result.getVersion(), equalTo(lastOpVersion));
-                    assertThat(result.hasFailure(), equalTo(true));
+                    assertThat(result.getResultType(), equalTo(Engine.Result.Type.FAILURE));
                     assertThat(result.getFailure(), instanceOf(VersionConflictEngineException.class));
                 } else {
                     Engine.DeleteResult result = engine.delete(versionedOp ? delWithVersion.apply(correctVersion, delete) : delete);
                     assertThat(result.isFound(), equalTo(docDeleted == false));
                     assertThat(result.getVersion(), equalTo(Math.max(lastOpVersion + 1, 1)));
-                    assertThat(result.hasFailure(), equalTo(false));
+                    assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS));
                     assertThat(result.getFailure(), nullValue());
                     docDeleted = true;
                     lastOpVersion = result.getVersion();
@@ -1725,14 +1725,14 @@ public class InternalEngineTests extends EngineTestCase {
                     assertThat(result.getSeqNo(), equalTo(seqNo));
                     assertThat(result.isCreated(), equalTo(docDeleted));
                     assertThat(result.getVersion(), equalTo(op.version()));
-                    assertThat(result.hasFailure(), equalTo(false));
+                    assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS));
                     assertThat(result.getFailure(), nullValue());
                     docDeleted = false;
                     highestOpVersion = op.version();
                 } else {
                     assertThat(result.isCreated(), equalTo(false));
                     assertThat(result.getVersion(), equalTo(highestOpVersion));
-                    assertThat(result.hasFailure(), equalTo(true));
+                    assertThat(result.getResultType(), equalTo(Engine.Result.Type.FAILURE));
                     assertThat(result.getFailure(), instanceOf(VersionConflictEngineException.class));
                 }
             } else {
@@ -1743,14 +1743,14 @@ public class InternalEngineTests extends EngineTestCase {
                     assertThat(result.getSeqNo(), equalTo(seqNo));
                     assertThat(result.isFound(), equalTo(docDeleted == false));
                     assertThat(result.getVersion(), equalTo(op.version()));
-                    assertThat(result.hasFailure(), equalTo(false));
+                    assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS));
                     assertThat(result.getFailure(), nullValue());
                     docDeleted = true;
                     highestOpVersion = op.version();
                 } else {
                     assertThat(result.isFound(), equalTo(docDeleted == false));
                     assertThat(result.getVersion(), equalTo(highestOpVersion));
-                    assertThat(result.hasFailure(), equalTo(true));
+                    assertThat(result.getResultType(), equalTo(Engine.Result.Type.FAILURE));
                     assertThat(result.getFailure(), instanceOf(VersionConflictEngineException.class));
                 }
             }
@@ -1863,7 +1863,7 @@ public class InternalEngineTests extends EngineTestCase {
                             get.version(), VersionType.INTERNAL,
                             PRIMARY, System.currentTimeMillis(), -1, false);
                         Engine.IndexResult indexResult = engine.index(index);
-                        if (indexResult.hasFailure() == false) {
+                        if (indexResult.getResultType() == Engine.Result.Type.SUCCESS) {
                             history.add(new OpAndVersion(indexResult.getVersion(), removed, added));
                         }
 
@@ -2005,7 +2005,7 @@ public class InternalEngineTests extends EngineTestCase {
                         "test", id, newUid(id), SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
                         rarely() ? 100 : Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0);
                     final Engine.DeleteResult result = initialEngine.delete(delete);
-                    if (!result.hasFailure()) {
+                    if (result.getResultType() == Engine.Result.Type.SUCCESS) {
                         assertThat(result.getSeqNo(), equalTo(primarySeqNo + 1));
                         assertThat(initialEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(primarySeqNo + 1));
                         indexedIds.remove(id);
@@ -2023,7 +2023,7 @@ public class InternalEngineTests extends EngineTestCase {
                         rarely() ? 100 : Versions.MATCH_ANY, VersionType.INTERNAL,
                         PRIMARY, 0, -1, false);
                     final Engine.IndexResult result = initialEngine.index(index);
-                    if (!result.hasFailure()) {
+                    if (result.getResultType() == Engine.Result.Type.SUCCESS) {
                         assertThat(result.getSeqNo(), equalTo(primarySeqNo + 1));
                         assertThat(initialEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(primarySeqNo + 1));
                         indexedIds.add(id);
@@ -2297,7 +2297,7 @@ public class InternalEngineTests extends EngineTestCase {
             // Try to index uid=1 with a too-old version, should fail:
             Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false);
             Engine.IndexResult indexResult = engine.index(index);
-            assertTrue(indexResult.hasFailure());
+            assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.FAILURE));
             assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class));
 
             // Get should still not find the document
@@ -2307,7 +2307,7 @@ public class InternalEngineTests extends EngineTestCase {
             // Try to index uid=2 with a too-old version, should fail:
             Engine.Index index1 = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false);
             indexResult = engine.index(index1);
-            assertTrue(indexResult.hasFailure());
+            assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.FAILURE));
             assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class));
 
             // Get should not find the document
@@ -3186,7 +3186,7 @@ public class InternalEngineTests extends EngineTestCase {
 
         index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
         indexResult = replicaEngine.index(index);
-        assertThat(indexResult.hasFailure(), equalTo(false));
+        assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.SUCCESS));
         replicaEngine.refresh("test");
         try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) {
             TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java
index 459fcb1d377..bd1d25eac57 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java
@@ -236,7 +236,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
 
         IndexShard shard = indexService.getShard(0);
         shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
-            sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {});
+            sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
         shard.refresh("test");
         try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
             LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();
@@ -278,7 +278,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
 
         IndexShard shard = indexService.getShard(0);
         shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
-            sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {});
+            sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
         shard.refresh("test");
         try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
             LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();
diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java
index 2d2aaac7bbd..736dc40e686 100644
--- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java
+++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java
@@ -251,7 +251,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
             // test only primary
             shards.startPrimary();
             BulkItemResponse response = shards.index(
-                    new IndexRequest(index.getName(), "testDocumentFailureReplication", "1")
+                    new IndexRequest(index.getName(), "type", "1")
                             .source("{}", XContentType.JSON)
             );
             assertTrue(response.isFailed());
@@ -265,7 +265,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
             }
             shards.startReplicas(nReplica);
             response = shards.index(
-                    new IndexRequest(index.getName(), "testDocumentFailureReplication", "1")
+                    new IndexRequest(index.getName(), "type", "1")
                             .source("{}", XContentType.JSON)
             );
             assertTrue(response.isFailed());
@@ -281,7 +281,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
         try (ReplicationGroup shards = createGroup(0)) {
             shards.startAll();
             BulkItemResponse response = shards.index(
-                    new IndexRequest(index.getName(), "testRequestFailureException", "1")
+                    new IndexRequest(index.getName(), "type", "1")
                             .source("{}", XContentType.JSON)
                             .version(2)
             );
@@ -300,7 +300,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
             }
             shards.startReplicas(nReplica);
             response = shards.index(
-                    new IndexRequest(index.getName(), "testRequestFailureException", "1")
+                    new IndexRequest(index.getName(), "type", "1")
                             .source("{}", XContentType.JSON)
                             .version(2)
             );
diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java
index 323b0364dfb..a34963a4751 100644
--- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java
+++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java
@@ -22,7 +22,6 @@ package org.elasticsearch.index.replication;
 import org.apache.logging.log4j.Logger;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexableField;
-import org.elasticsearch.core.internal.io.IOUtils;
 import org.elasticsearch.action.admin.indices.flush.FlushRequest;
 import org.elasticsearch.action.bulk.BulkShardRequest;
 import org.elasticsearch.action.index.IndexRequest;
@@ -33,6 +32,7 @@ import org.elasticsearch.common.bytes.BytesArray;
 import org.elasticsearch.common.lucene.uid.Versions;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.core.internal.io.IOUtils;
 import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.index.VersionType;
 import org.elasticsearch.index.engine.Engine;
@@ -183,8 +183,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
                     VersionType.EXTERNAL,
                     randomNonNegativeLong(),
                     false,
-                    SourceToParse.source("index", "type", "replica", new BytesArray("{}"), XContentType.JSON),
-                    mapping -> {});
+                    SourceToParse.source("index", "type", "replica", new BytesArray("{}"), XContentType.JSON));
             shards.promoteReplicaToPrimary(promotedReplica).get();
             oldPrimary.close("demoted", randomBoolean());
             oldPrimary.store().close();
@@ -199,9 +198,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
                         VersionType.INTERNAL,
                         SourceToParse.source("index", "type", "primary", new BytesArray("{}"), XContentType.JSON),
                         randomNonNegativeLong(),
-                        false,
-                        mapping -> {
-                        });
+                        false);
             }
             final IndexShard recoveredReplica =
                     shards.addReplicaWithExistingPath(remainingReplica.shardPath(), remainingReplica.routingEntry().currentNodeId());
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
index bc34aa60c49..d6d50b24d1f 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
@@ -99,6 +99,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE;
 import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
 import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
 import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog;
 import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
 import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
 import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
@@ -107,7 +108,6 @@ import static org.hamcrest.Matchers.allOf;
 import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.greaterThan;
-import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog;
 
 public class IndexShardIT extends ESSingleNodeTestCase {
 
@@ -342,7 +342,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
         assertFalse(shard.shouldPeriodicallyFlush());
         shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
             SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON),
-            IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {});
+            IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
         assertTrue(shard.shouldPeriodicallyFlush());
         final Translog translog = getTranslog(shard);
         assertEquals(2, translog.stats().getUncommittedOperations());
@@ -379,7 +379,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
                         .put("index.number_of_shards", 1)
                         .put("index.translog.generation_threshold_size", generationThreshold + "b")
                         .build();
-        createIndex("test", settings);
+        createIndex("test", settings, "test");
         ensureGreen("test");
         final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
         final IndexService test = indicesService.indexService(resolveIndex("test"));
@@ -392,7 +392,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
             assertThat(translog.currentFileGeneration(), equalTo(generation + rolls));
             final Engine.IndexResult result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
                 SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON),
-                IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {});
+                IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
             final Translog.Location location = result.getTranslogLocation();
             shard.afterWriteOperation();
             if (location.translogLocation + location.size > generationThreshold) {
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
index e945bc12705..31e51ed43d4 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
@@ -84,7 +84,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache;
 import org.elasticsearch.index.fielddata.IndexFieldDataService;
 import org.elasticsearch.index.mapper.IdFieldMapper;
 import org.elasticsearch.index.mapper.MappedFieldType;
-import org.elasticsearch.index.mapper.Mapping;
 import org.elasticsearch.index.mapper.SourceToParse;
 import org.elasticsearch.index.mapper.Uid;
 import org.elasticsearch.index.seqno.SequenceNumbers;
@@ -1063,7 +1062,7 @@ public class IndexShardTests extends IndexShardTestCase {
      */
     public void testSnapshotStore() throws IOException {
         final IndexShard shard = newStartedShard(true);
-        indexDoc(shard, "test", "0");
+        indexDoc(shard, "_doc", "0");
         flushShard(shard);
 
         final IndexShard newShard = reinitShard(shard);
@@ -1139,9 +1138,9 @@ public class IndexShardTests extends IndexShardTestCase {
         IndexShard test = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
         recoverShardFromStore(test);
 
-        indexDoc(test, "test", "test");
+        indexDoc(test, "_doc", "test");
         assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion());
-        indexDoc(test, "test", "test");
+        indexDoc(test, "_doc", "test");
         assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion());
         test.getEngine().flush();
         assertEquals(Version.CURRENT.luceneVersion, test.minimumCompatibleVersion());
@@ -1188,19 +1187,19 @@ public class IndexShardTests extends IndexShardTestCase {
         long initialTotalTime = shard.refreshStats().getTotalTimeInMillis();
         // check time advances
         for (int i = 1; shard.refreshStats().getTotalTimeInMillis() == initialTotalTime; i++) {
-            indexDoc(shard, "test", "test");
+            indexDoc(shard, "_doc", "test");
             assertThat(shard.refreshStats().getTotal(), equalTo(2L + i - 1));
             shard.refresh("test");
             assertThat(shard.refreshStats().getTotal(), equalTo(2L + i));
             assertThat(shard.refreshStats().getTotalTimeInMillis(), greaterThanOrEqualTo(initialTotalTime));
         }
         long refreshCount = shard.refreshStats().getTotal();
-        indexDoc(shard, "test", "test");
+        indexDoc(shard, "_doc", "test");
         try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "test", "test",
             new Term(IdFieldMapper.NAME, Uid.encodeId("test"))))) {
             assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount+1));
         }
-        indexDoc(shard, "test", "test");
+        indexDoc(shard, "_doc", "test");
         shard.writeIndexingBuffer();
         assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount+2));
         closeShards(shard);
@@ -1208,7 +1207,7 @@ public class IndexShardTests extends IndexShardTestCase {
 
     public void testIndexingOperationsListeners() throws IOException {
         IndexShard shard = newStartedShard(true);
-        indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}");
+        indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}");
         shard.updateLocalCheckpointForShard(shard.shardRouting.allocationId().getId(), 0);
         AtomicInteger preIndex = new AtomicInteger();
         AtomicInteger postIndexCreate = new AtomicInteger();
@@ -1227,14 +1226,19 @@ public class IndexShardTests extends IndexShardTestCase {
 
             @Override
             public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) {
-                if (result.hasFailure() == false) {
-                    if (result.isCreated()) {
-                        postIndexCreate.incrementAndGet();
-                    } else {
-                        postIndexUpdate.incrementAndGet();
-                    }
-                } else {
-                    postIndex(shardId, index, result.getFailure());
+                switch (result.getResultType()) {
+                    case SUCCESS:
+                        if (result.isCreated()) {
+                            postIndexCreate.incrementAndGet();
+                        } else {
+                            postIndexUpdate.incrementAndGet();
+                        }
+                        break;
+                    case FAILURE:
+                        postIndex(shardId, index, result.getFailure());
+                        break;
+                    default:
+                        fail("unexpected result type:" + result.getResultType());
                 }
             }
 
@@ -1251,10 +1255,15 @@ public class IndexShardTests extends IndexShardTestCase {
 
             @Override
             public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) {
-                if (result.hasFailure() == false) {
-                    postDelete.incrementAndGet();
-                } else {
-                    postDelete(shardId, delete, result.getFailure());
+                switch (result.getResultType()) {
+                    case SUCCESS:
+                        postDelete.incrementAndGet();
+                        break;
+                    case FAILURE:
+                        postDelete(shardId, delete, result.getFailure());
+                        break;
+                    default:
+                        fail("unexpected result type:" + result.getResultType());
                 }
             }
 
@@ -1266,7 +1275,7 @@ public class IndexShardTests extends IndexShardTestCase {
         });
         recoverShardFromStore(shard);
 
-        indexDoc(shard, "test", "1");
+        indexDoc(shard, "_doc", "1");
         assertEquals(1, preIndex.get());
         assertEquals(1, postIndexCreate.get());
         assertEquals(0, postIndexUpdate.get());
@@ -1275,7 +1284,7 @@ public class IndexShardTests extends IndexShardTestCase {
         assertEquals(0, postDelete.get());
         assertEquals(0, postDeleteException.get());
 
-        indexDoc(shard, "test", "1");
+        indexDoc(shard, "_doc", "1");
         assertEquals(2, preIndex.get());
         assertEquals(1, postIndexCreate.get());
         assertEquals(1, postIndexUpdate.get());
@@ -1284,7 +1293,7 @@ public class IndexShardTests extends IndexShardTestCase {
         assertEquals(0, postDelete.get());
         assertEquals(0, postDeleteException.get());
 
-        deleteDoc(shard, "test", "1");
+        deleteDoc(shard, "_doc", "1");
 
         assertEquals(2, preIndex.get());
         assertEquals(1, postIndexCreate.get());
@@ -1298,7 +1307,7 @@ public class IndexShardTests extends IndexShardTestCase {
         shard.state = IndexShardState.STARTED; // It will generate exception
 
         try {
-            indexDoc(shard, "test", "1");
+            indexDoc(shard, "_doc", "1");
             fail();
         } catch (AlreadyClosedException e) {
 
@@ -1312,7 +1321,7 @@ public class IndexShardTests extends IndexShardTestCase {
         assertEquals(1, postDelete.get());
         assertEquals(0, postDeleteException.get());
         try {
-            deleteDoc(shard, "test", "1");
+            deleteDoc(shard, "_doc", "1");
             fail();
         } catch (AlreadyClosedException e) {
 
@@ -1458,7 +1467,7 @@ public class IndexShardTests extends IndexShardTestCase {
         closeShards(shard);
     }
 
-    public void testShardCanNotBeMarkedAsRelocatedIfRelocationCancelled() throws IOException, InterruptedException {
+    public void testShardCanNotBeMarkedAsRelocatedIfRelocationCancelled() throws IOException {
         final IndexShard shard = newStartedShard(true);
         final ShardRouting originalRouting = shard.routingEntry();
         IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(originalRouting, "other_node"));
@@ -1531,19 +1540,18 @@ public class IndexShardTests extends IndexShardTestCase {
          * - If flush and then recover from the existing store, delete #1 will be removed while index #0 is still retained and replayed.
          */
         final IndexShard shard = newStartedShard(false);
-        final Consumer mappingConsumer = getMappingUpdater(shard, "test");
-        shard.applyDeleteOperationOnReplica(1, 2, "test", "id", VersionType.EXTERNAL, mappingConsumer);
+        shard.applyDeleteOperationOnReplica(1, 2, "_doc", "id", VersionType.EXTERNAL);
         shard.getEngine().rollTranslogGeneration(); // isolate the delete in it's own generation
         shard.applyIndexOperationOnReplica(0, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
-            SourceToParse.source(shard.shardId().getIndexName(), "test", "id", new BytesArray("{}"), XContentType.JSON), mappingConsumer);
+            SourceToParse.source(shard.shardId().getIndexName(), "_doc", "id", new BytesArray("{}"), XContentType.JSON));
         shard.applyIndexOperationOnReplica(3, 3, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
-            SourceToParse.source(shard.shardId().getIndexName(), "test", "id-3", new BytesArray("{}"), XContentType.JSON), mappingConsumer);
+            SourceToParse.source(shard.shardId().getIndexName(), "_doc", "id-3", new BytesArray("{}"), XContentType.JSON));
         // Flushing a new commit with local checkpoint=1 allows to skip the translog gen #1 in recovery.
         shard.flush(new FlushRequest().force(true).waitIfOngoing(true));
         shard.applyIndexOperationOnReplica(2, 3, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
-            SourceToParse.source(shard.shardId().getIndexName(), "test", "id-2", new BytesArray("{}"), XContentType.JSON), mappingConsumer);
+            SourceToParse.source(shard.shardId().getIndexName(), "_doc", "id-2", new BytesArray("{}"), XContentType.JSON));
         shard.applyIndexOperationOnReplica(5, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
-            SourceToParse.source(shard.shardId().getIndexName(), "test", "id-5", new BytesArray("{}"), XContentType.JSON), mappingConsumer);
+            SourceToParse.source(shard.shardId().getIndexName(), "_doc", "id-5", new BytesArray("{}"), XContentType.JSON));
 
         final int translogOps;
         if (randomBoolean()) {
@@ -1580,7 +1588,7 @@ public class IndexShardTests extends IndexShardTestCase {
         int totalOps = randomInt(10);
         int translogOps = totalOps;
         for (int i = 0; i < totalOps; i++) {
-            indexDoc(shard, "test", Integer.toString(i));
+            indexDoc(shard, "_doc", Integer.toString(i));
         }
         if (randomBoolean()) {
             shard.updateLocalCheckpointForShard(shard.shardRouting.allocationId().getId(), totalOps - 1);
@@ -1608,7 +1616,7 @@ public class IndexShardTests extends IndexShardTestCase {
         final IndexShard primarySource = newStartedShard(true);
         int totalOps = randomInt(10);
         for (int i = 0; i < totalOps; i++) {
-            indexDoc(primarySource, "test", Integer.toString(i));
+            indexDoc(primarySource, "_doc", Integer.toString(i));
         }
         IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1));
         final IndexShard primaryTarget = newShard(primarySource.routingEntry().getTargetRelocatingShard());
@@ -1626,15 +1634,15 @@ public class IndexShardTests extends IndexShardTestCase {
     /* This test just verifies that we fill up local checkpoint up to max seen seqID on primary recovery */
     public void testRecoverFromStoreWithNoOps() throws IOException {
         final IndexShard shard = newStartedShard(true);
-        indexDoc(shard, "test", "0");
-        Engine.IndexResult test = indexDoc(shard, "test", "1");
+        indexDoc(shard, "_doc", "0");
+        Engine.IndexResult test = indexDoc(shard, "_doc", "1");
         // start a replica shard and index the second doc
         final IndexShard otherShard = newStartedShard(false);
         updateMappings(otherShard, shard.indexSettings().getIndexMetaData());
-        SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), "test", "1",
+        SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), "_doc", "1",
             new BytesArray("{}"), XContentType.JSON);
         otherShard.applyIndexOperationOnReplica(1, 1,
-            VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse, update -> {});
+            VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
 
         final ShardRouting primaryShardRouting = shard.routingEntry();
         IndexShard newShard = reinitShard(otherShard, ShardRoutingHelper.initWithSameId(primaryShardRouting,
@@ -1676,7 +1684,7 @@ public class IndexShardTests extends IndexShardTestCase {
 
     public void testRecoverFromCleanStore() throws IOException {
         final IndexShard shard = newStartedShard(true);
-        indexDoc(shard, "test", "0");
+        indexDoc(shard, "_doc", "0");
         if (randomBoolean()) {
             flushShard(shard);
         }
@@ -1699,7 +1707,7 @@ public class IndexShardTests extends IndexShardTestCase {
 
     public void testFailIfIndexNotPresentInRecoverFromStore() throws Exception {
         final IndexShard shard = newStartedShard(true);
-        indexDoc(shard, "test", "0");
+        indexDoc(shard, "_doc", "0");
         if (randomBoolean()) {
             flushShard(shard);
         }
@@ -1739,7 +1747,7 @@ public class IndexShardTests extends IndexShardTestCase {
         assertDocCount(newShard, 0);
         // we can't issue this request through a client because of the inconsistencies we created with the cluster state
         // doing it directly instead
-        indexDoc(newShard, "test", "0");
+        indexDoc(newShard, "_doc", "0");
         newShard.refresh("test");
         assertDocCount(newShard, 1);
 
@@ -1749,21 +1757,20 @@ public class IndexShardTests extends IndexShardTestCase {
     public void testRecoverFromStoreRemoveStaleOperations() throws Exception {
         final IndexShard shard = newStartedShard(false);
         final String indexName = shard.shardId().getIndexName();
-        final Consumer mapping = getMappingUpdater(shard, "doc");
         // Index #0, index #1
         shard.applyIndexOperationOnReplica(0, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
-            SourceToParse.source(indexName, "doc", "doc-0", new BytesArray("{}"), XContentType.JSON), mapping);
+            SourceToParse.source(indexName, "_doc", "doc-0", new BytesArray("{}"), XContentType.JSON));
         flushShard(shard);
         shard.updateGlobalCheckpointOnReplica(0, "test"); // stick the global checkpoint here.
         shard.applyIndexOperationOnReplica(1, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
-            SourceToParse.source(indexName, "doc", "doc-1", new BytesArray("{}"), XContentType.JSON), mapping);
+            SourceToParse.source(indexName, "_doc", "doc-1", new BytesArray("{}"), XContentType.JSON));
         flushShard(shard);
         assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1"));
         // Simulate resync (without rollback): Noop #1, index #2
         acquireReplicaOperationPermitBlockingly(shard, shard.primaryTerm + 1);
         shard.markSeqNoAsNoop(1, "test");
         shard.applyIndexOperationOnReplica(2, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
-            SourceToParse.source(indexName, "doc", "doc-2", new BytesArray("{}"), XContentType.JSON), mapping);
+            SourceToParse.source(indexName, "_doc", "doc-2", new BytesArray("{}"), XContentType.JSON));
         flushShard(shard);
         assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1", "doc-2"));
         // Recovering from store should discard doc #1
@@ -1800,11 +1807,11 @@ public class IndexShardTests extends IndexShardTestCase {
         final IndexShard source = newStartedShard(true);
         IndexShard target = newStartedShard(true);
 
-        indexDoc(source, "test", "0");
+        indexDoc(source, "_doc", "0");
         if (randomBoolean()) {
             source.refresh("test");
         }
-        indexDoc(target, "test", "1");
+        indexDoc(target, "_doc", "1");
         target.refresh("test");
         assertDocs(target, "1");
         flushShard(source); // only flush source
@@ -1850,8 +1857,8 @@ public class IndexShardTests extends IndexShardTestCase {
 
     public void testSearcherWrapperIsUsed() throws IOException {
         IndexShard shard = newStartedShard(true);
-        indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}");
-        indexDoc(shard, "test", "1", "{\"foobar\" : \"bar\"}");
+        indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}");
+        indexDoc(shard, "_doc", "1", "{\"foobar\" : \"bar\"}");
         shard.refresh("test");
 
         Engine.GetResult getResult = shard.get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1"))));
@@ -1915,14 +1922,14 @@ public class IndexShardTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .build();
         IndexMetaData metaData = IndexMetaData.builder("test")
-            .putMapping("test", "{ \"properties\": { \"foo\":  { \"type\": \"text\", \"fielddata\": true }}}")
+            .putMapping("_doc", "{ \"properties\": { \"foo\":  { \"type\": \"text\", \"fielddata\": true }}}")
             .settings(settings)
             .primaryTerm(0, 1).build();
         IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, wrapper);
         recoverShardFromStore(shard);
-        indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}");
+        indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}");
         shard.refresh("created segment 1");
-        indexDoc(shard, "test", "1", "{\"foobar\" : \"bar\"}");
+        indexDoc(shard, "_doc", "1", "{\"foobar\" : \"bar\"}");
         shard.refresh("created segment 2");
 
         // test global ordinals are evicted
@@ -1955,9 +1962,9 @@ public class IndexShardTests extends IndexShardTestCase {
 
     public void testIndexingOperationListenersIsInvokedOnRecovery() throws IOException {
         IndexShard shard = newStartedShard(true);
-        indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}");
-        deleteDoc(shard, "test", "0");
-        indexDoc(shard, "test", "1", "{\"foo\" : \"bar\"}");
+        indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}");
+        deleteDoc(shard, "_doc", "0");
+        indexDoc(shard, "_doc", "1", "{\"foo\" : \"bar\"}");
         shard.refresh("test");
 
         final AtomicInteger preIndex = new AtomicInteger();
@@ -2007,7 +2014,7 @@ public class IndexShardTests extends IndexShardTestCase {
 
     public void testSearchIsReleaseIfWrapperFails() throws IOException {
         IndexShard shard = newStartedShard(true);
-        indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}");
+        indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}");
         shard.refresh("test");
         IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {
             @Override
@@ -2043,13 +2050,13 @@ public class IndexShardTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .build();
         IndexMetaData metaData = IndexMetaData.builder("test")
-            .putMapping("test", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
+            .putMapping("_doc", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
             .settings(settings)
             .primaryTerm(0, 1).build();
         IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
         recoverShardFromStore(primary);
 
-        indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}");
+        indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}");
         IndexShard replica = newShard(primary.shardId(), false, "n2", metaData, null);
         recoverReplica(replica, primary, (shard, discoveryNode) ->
             new RecoveryTarget(shard, discoveryNode, recoveryListener, aLong -> {
@@ -2071,7 +2078,7 @@ public class IndexShardTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .build();
         IndexMetaData metaData = IndexMetaData.builder("test")
-            .putMapping("test", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
+            .putMapping("_doc", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
             .settings(settings)
             .primaryTerm(0, randomLongBetween(1, Long.MAX_VALUE)).build();
         IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
@@ -2080,11 +2087,11 @@ public class IndexShardTests extends IndexShardTestCase {
         int numCorruptEntries = 0;
         for (int i = 0; i < numTotalEntries; i++) {
             if (randomBoolean()) {
-                operations.add(new Translog.Index("test", "1", 0, primary.getPrimaryTerm(), 1, VersionType.INTERNAL,
+                operations.add(new Translog.Index("_doc", "1", 0, primary.getPrimaryTerm(), 1, VersionType.INTERNAL,
                     "{\"foo\" : \"bar\"}".getBytes(Charset.forName("UTF-8")), null, -1));
             } else {
                 // corrupt entry
-                operations.add(new Translog.Index("test", "2", 1,  primary.getPrimaryTerm(), 1, VersionType.INTERNAL,
+                operations.add(new Translog.Index("_doc", "2", 1,  primary.getPrimaryTerm(), 1, VersionType.INTERNAL,
                     "{\"foo\" : \"bar}".getBytes(Charset.forName("UTF-8")), null, -1));
                 numCorruptEntries++;
             }
@@ -2124,7 +2131,7 @@ public class IndexShardTests extends IndexShardTestCase {
 
     public void testShardActiveDuringInternalRecovery() throws IOException {
         IndexShard shard = newStartedShard(true);
-        indexDoc(shard, "type", "0");
+        indexDoc(shard, "_doc", "0");
         shard = reinitShard(shard);
         DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
         shard.markAsRecovering("for testing", new RecoveryState(shard.routingEntry(), localNode, null));
@@ -2145,13 +2152,13 @@ public class IndexShardTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .build();
         IndexMetaData metaData = IndexMetaData.builder("test")
-            .putMapping("test", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
+            .putMapping("_doc", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
             .settings(settings)
             .primaryTerm(0, 1).build();
         IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
         recoverShardFromStore(primary);
 
-        indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}");
+        indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}");
         IndexShard replica = newShard(primary.shardId(), false, "n2", metaData, null);
         DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
         replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode));
@@ -2178,13 +2185,13 @@ public class IndexShardTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .build();
         IndexMetaData metaData = IndexMetaData.builder("test")
-            .putMapping("test", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
+            .putMapping("_doc", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
             .settings(settings)
             .primaryTerm(0, 1).build();
         IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
         recoverShardFromStore(primary);
 
-        indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}");
+        indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}");
         Consumer assertListenerCalled = shard -> {
             AtomicBoolean called = new AtomicBoolean();
             shard.addRefreshListener(null, b -> {
@@ -2230,15 +2237,15 @@ public class IndexShardTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .build();
         IndexMetaData metaData = IndexMetaData.builder("source")
-            .putMapping("test", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
+            .putMapping("_doc", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
             .settings(settings)
             .primaryTerm(0, 1).build();
 
         IndexShard sourceShard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
         recoverShardFromStore(sourceShard);
 
-        indexDoc(sourceShard, "test", "0", "{\"foo\" : \"bar\"}");
-        indexDoc(sourceShard, "test", "1", "{\"foo\" : \"bar\"}");
+        indexDoc(sourceShard, "_doc", "0", "{\"foo\" : \"bar\"}");
+        indexDoc(sourceShard, "_doc", "1", "{\"foo\" : \"bar\"}");
         sourceShard.refresh("test");
 
 
@@ -2290,8 +2297,8 @@ public class IndexShardTests extends IndexShardTestCase {
             closeShards(newShard);
         }
 
-        assertThat(requestedMappingUpdates, hasKey("test"));
-        assertThat(requestedMappingUpdates.get("test").get().source().string(), equalTo("{\"properties\":{\"foo\":{\"type\":\"text\"}}}"));
+        assertThat(requestedMappingUpdates, hasKey("_doc"));
+        assertThat(requestedMappingUpdates.get("_doc").get().source().string(), equalTo("{\"properties\":{\"foo\":{\"type\":\"text\"}}}"));
 
         closeShards(sourceShard, targetShard);
     }
@@ -2306,7 +2313,7 @@ public class IndexShardTests extends IndexShardTestCase {
             final long numDocsToDelete = randomIntBetween((int) Math.ceil(Math.nextUp(numDocs / 10.0)), Math.toIntExact(numDocs));
             for (int i = 0; i < numDocs; i++) {
                 final String id = Integer.toString(i);
-                indexDoc(indexShard, "test", id);
+                indexDoc(indexShard, "_doc", id);
             }
             if (randomBoolean()) {
                 indexShard.refresh("test");
@@ -2328,8 +2335,8 @@ public class IndexShardTests extends IndexShardTestCase {
                 IntStream.range(0, Math.toIntExact(numDocs)).boxed().collect(Collectors.toList()));
             for (final Integer i : ids) {
                 final String id = Integer.toString(i);
-                deleteDoc(indexShard, "test", id);
-                indexDoc(indexShard, "test", id);
+                deleteDoc(indexShard, "_doc", id);
+                indexDoc(indexShard, "_doc", id);
             }
 
             // flush the buffered deletes
@@ -2386,7 +2393,7 @@ public class IndexShardTests extends IndexShardTestCase {
                         .field("point", randomFloat())
                         .field("description", randomUnicodeOfCodepointLength(100))
                     .endObject());
-                indexDoc(indexShard, "doc", Integer.toString(i), doc);
+                indexDoc(indexShard, "_doc", Integer.toString(i), doc);
             }
 
             assertThat("Without flushing, segment sizes should be zero",
@@ -2413,7 +2420,7 @@ public class IndexShardTests extends IndexShardTestCase {
                 if (randomBoolean()) {
                     deleteDoc(indexShard, "doc", Integer.toString(i));
                 } else {
-                    indexDoc(indexShard, "doc", Integer.toString(i), "{\"foo\": \"bar\"}");
+                    indexDoc(indexShard, "_doc", Integer.toString(i), "{\"foo\": \"bar\"}");
                 }
             }
             if (randomBoolean()) {
@@ -2442,11 +2449,11 @@ public class IndexShardTests extends IndexShardTestCase {
      */
     public void testReadSnapshotConcurrently() throws IOException, InterruptedException {
         IndexShard indexShard = newStartedShard();
-        indexDoc(indexShard, "doc", "0", "{\"foo\" : \"bar\"}");
+        indexDoc(indexShard, "_doc", "0", "{}");
         if (randomBoolean()) {
             indexShard.refresh("test");
         }
-        indexDoc(indexShard, "doc", "1", "{\"foo\" : \"bar\"}");
+        indexDoc(indexShard, "_doc", "1", "{}");
         indexShard.flush(new FlushRequest());
         closeShards(indexShard);
 
@@ -2490,7 +2497,7 @@ public class IndexShardTests extends IndexShardTestCase {
         IndexShard indexShard = newStartedShard(isPrimary);
         final long numDocs = between(10, 100);
         for (long i = 0; i < numDocs; i++) {
-            indexDoc(indexShard, "doc", Long.toString(i), "{\"foo\" : \"bar\"}");
+            indexDoc(indexShard, "_doc", Long.toString(i), "{}");
             if (randomBoolean()) {
                 indexShard.refresh("test");
             }
@@ -2577,11 +2584,10 @@ public class IndexShardTests extends IndexShardTestCase {
         for (int i = offset + 1; i < operations; i++) {
             if (!rarely() || i == operations - 1) { // last operation can't be a gap as it's not a gap anymore
                 final String id = Integer.toString(i);
-                SourceToParse sourceToParse = SourceToParse.source(indexShard.shardId().getIndexName(), "test", id,
+                SourceToParse sourceToParse = SourceToParse.source(indexShard.shardId().getIndexName(), "_doc", id,
                         new BytesArray("{}"), XContentType.JSON);
                 indexShard.applyIndexOperationOnReplica(i,
-                        1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse,
-                        getMappingUpdater(indexShard, sourceToParse.type()));
+                    1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
                 if (!gap && i == localCheckpoint + 1) {
                     localCheckpoint++;
                 }
@@ -2702,12 +2708,12 @@ public class IndexShardTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .build();
         IndexMetaData metaData = IndexMetaData.builder("test")
-            .putMapping("test", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
+            .putMapping("_doc", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
             .settings(settings)
             .primaryTerm(0, 1).build();
         IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
         recoverShardFromStore(primary);
-        indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}");
+        indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}");
         assertTrue(primary.getEngine().refreshNeeded());
         assertTrue(primary.scheduledRefresh());
         assertFalse(primary.isSearchIdle());
@@ -2740,12 +2746,12 @@ public class IndexShardTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .build();
         IndexMetaData metaData = IndexMetaData.builder("test")
-            .putMapping("test", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
+            .putMapping("_doc", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
             .settings(settings)
             .primaryTerm(0, 1).build();
         IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
         recoverShardFromStore(primary);
-        indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}");
+        indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}");
         assertTrue(primary.getEngine().refreshNeeded());
         assertTrue(primary.scheduledRefresh());
         IndexScopedSettings scopedSettings = primary.indexSettings().getScopedSettings();
@@ -2753,7 +2759,7 @@ public class IndexShardTests extends IndexShardTestCase {
         scopedSettings.applySettings(settings);
 
         assertFalse(primary.getEngine().refreshNeeded());
-        indexDoc(primary, "test", "1", "{\"foo\" : \"bar\"}");
+        indexDoc(primary, "_doc", "1", "{\"foo\" : \"bar\"}");
         assertTrue(primary.getEngine().refreshNeeded());
         long lastSearchAccess = primary.getLastSearcherAccess();
         assertFalse(primary.scheduledRefresh());
@@ -2792,7 +2798,7 @@ public class IndexShardTests extends IndexShardTestCase {
         });
         latch1.await();
 
-        indexDoc(primary, "test", "2", "{\"foo\" : \"bar\"}");
+        indexDoc(primary, "_doc", "2", "{\"foo\" : \"bar\"}");
         assertFalse(primary.scheduledRefresh());
         assertTrue(primary.isSearchIdle());
         primary.checkIdle(0);
@@ -2809,15 +2815,15 @@ public class IndexShardTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .build();
         IndexMetaData metaData = IndexMetaData.builder("test")
-            .putMapping("test", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
+            .putMapping("_doc", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
             .settings(settings)
             .primaryTerm(0, 1).build();
         IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
         recoverShardFromStore(primary);
-        indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}");
+        indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}");
         assertTrue(primary.getEngine().refreshNeeded());
         assertTrue(primary.scheduledRefresh());
-        Engine.IndexResult doc = indexDoc(primary, "test", "1", "{\"foo\" : \"bar\"}");
+        Engine.IndexResult doc = indexDoc(primary, "_doc", "1", "{\"foo\" : \"bar\"}");
         CountDownLatch latch = new CountDownLatch(1);
         primary.addRefreshListener(doc.getTranslogLocation(), r -> latch.countDown());
         assertEquals(1, latch.getCount());
@@ -2829,7 +2835,7 @@ public class IndexShardTests extends IndexShardTestCase {
         settings = Settings.builder().put(settings).put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO).build();
         scopedSettings.applySettings(settings);
 
-        doc = indexDoc(primary, "test", "2", "{\"foo\" : \"bar\"}");
+        doc = indexDoc(primary, "_doc", "2", "{\"foo\" : \"bar\"}");
         CountDownLatch latch1 = new CountDownLatch(1);
         primary.addRefreshListener(doc.getTranslogLocation(), r -> latch1.countDown());
         assertEquals(1, latch1.getCount());
@@ -2845,12 +2851,12 @@ public class IndexShardTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .build();
         IndexMetaData metaData = IndexMetaData.builder("test")
-            .putMapping("test", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
+            .putMapping("_doc", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
             .settings(settings)
             .primaryTerm(0, 1).build();
         IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
         recoverShardFromStore(primary);
-        indexDoc(primary, "test", "0", "{\"foo\" : \"foo\"}");
+        indexDoc(primary, "_doc", "0", "{\"foo\" : \"foo\"}");
         primary.refresh("forced refresh");
 
         SegmentsStats ss = primary.segmentStats(randomBoolean());
@@ -2858,9 +2864,9 @@ public class IndexShardTests extends IndexShardTestCase {
         assertThat(ss.getMemoryInBytes(), equalTo(breaker.getUsed()));
         final long preRefreshBytes = ss.getMemoryInBytes();
 
-        indexDoc(primary, "test", "1", "{\"foo\" : \"bar\"}");
-        indexDoc(primary, "test", "2", "{\"foo\" : \"baz\"}");
-        indexDoc(primary, "test", "3", "{\"foo\" : \"eggplant\"}");
+        indexDoc(primary, "_doc", "1", "{\"foo\" : \"bar\"}");
+        indexDoc(primary, "_doc", "2", "{\"foo\" : \"baz\"}");
+        indexDoc(primary, "_doc", "3", "{\"foo\" : \"eggplant\"}");
 
         ss = primary.segmentStats(randomBoolean());
         breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING);
@@ -2873,7 +2879,7 @@ public class IndexShardTests extends IndexShardTestCase {
         assertThat(breaker.getUsed(), equalTo(ss.getMemoryInBytes()));
         assertThat(breaker.getUsed(), greaterThan(preRefreshBytes));
 
-        indexDoc(primary, "test", "4", "{\"foo\": \"potato\"}");
+        indexDoc(primary, "_doc", "4", "{\"foo\": \"potato\"}");
         // Forces a refresh with the INTERNAL scope
         ((InternalEngine) primary.getEngine()).writeIndexingBuffer();
 
@@ -2884,7 +2890,7 @@ public class IndexShardTests extends IndexShardTestCase {
         final long postRefreshBytes = ss.getMemoryInBytes();
 
         // Deleting a doc causes its memory to be freed from the breaker
-        deleteDoc(primary, "test", "0");
+        deleteDoc(primary, "_doc", "0");
         primary.refresh("force refresh");
 
         ss = primary.segmentStats(randomBoolean());
@@ -2903,7 +2909,7 @@ public class IndexShardTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .build();
         IndexMetaData metaData = IndexMetaData.builder("test")
-            .putMapping("test", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
+            .putMapping("_doc", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
             .settings(settings)
             .primaryTerm(0, 1).build();
         IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
@@ -2923,13 +2929,13 @@ public class IndexShardTests extends IndexShardTestCase {
                         if (randomBoolean()) {
                             String id = "id-" + threadName + "-" + i;
                             logger.debug("--> {} indexing {}", threadName, id);
-                            indexDoc(primary, "test", id, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}");
+                            indexDoc(primary, "_doc", id, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}");
                         }
 
                         if (randomBoolean() && i > 10) {
                             String id = "id-" + threadName + "-" + randomIntBetween(0, i - 1);
                             logger.debug("--> {}, deleting {}", threadName, id);
-                            deleteDoc(primary, "test", id);
+                            deleteDoc(primary, "_doc", id);
                         }
 
                         if (randomBoolean()) {
@@ -2998,7 +3004,7 @@ public class IndexShardTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .build();
         IndexMetaData metaData = IndexMetaData.builder("test")
-            .putMapping("test", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
+            .putMapping("_doc", "{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}")
             .settings(settings)
             .primaryTerm(0, 1).build();
         ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(metaData.getIndex(), 0), "n1", true, ShardRoutingState
@@ -3019,7 +3025,7 @@ public class IndexShardTests extends IndexShardTestCase {
         primaryRef.set(primary);
         recoverShardFromStore(primary);
         for (int i = 0; i < 3; i++) {
-            indexDoc(primary, "test", "" + i, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}");
+            indexDoc(primary, "_doc", "" + i, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}");
             primary.refresh("test"); // produce segments
         }
         List segments = primary.segments(false);
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java
index 91e439dcda9..037b5d68fb8 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java
@@ -57,10 +57,15 @@ public class IndexingOperationListenerTests extends ESTestCase{
             @Override
             public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) {
                 assertThat(shardId, is(randomShardId));
-                if (result.hasFailure() == false) {
-                    postIndex.incrementAndGet();
-                } else {
-                    postIndex(shardId, index, result.getFailure());
+                switch (result.getResultType()) {
+                    case SUCCESS:
+                        postIndex.incrementAndGet();
+                        break;
+                    case FAILURE:
+                        postIndex(shardId, index, result.getFailure());
+                        break;
+                    default:
+                        throw new IllegalArgumentException("unknown result type: " + result.getResultType());
                 }
             }
 
@@ -80,10 +85,15 @@ public class IndexingOperationListenerTests extends ESTestCase{
             @Override
             public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) {
                 assertThat(shardId, is(randomShardId));
-                if (result.hasFailure() == false) {
-                    postDelete.incrementAndGet();
-                } else {
-                    postDelete(shardId, delete, result.getFailure());
+                switch (result.getResultType()) {
+                    case SUCCESS:
+                        postDelete.incrementAndGet();
+                        break;
+                    case FAILURE:
+                        postDelete(shardId, delete, result.getFailure());
+                        break;
+                    default:
+                        throw new IllegalArgumentException("unknown result type: " + result.getResultType());
                 }
             }
 
diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java
index 12c3804a1a7..1257aea3d14 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java
@@ -67,8 +67,8 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase {
         for (int i = 0; i < numDocs; i++) {
             // Index doc but not advance local checkpoint.
             shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
-                SourceToParse.source(shard.shardId().getIndexName(), "test", Integer.toString(i), new BytesArray("{}"), XContentType.JSON),
-                IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(shard, "test"));
+                SourceToParse.source(shard.shardId().getIndexName(), "_doc", Integer.toString(i), new BytesArray("{}"), XContentType.JSON),
+                IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
         }
 
         long globalCheckPoint = numDocs > 0 ? randomIntBetween(0, numDocs - 1) : 0;
@@ -121,8 +121,8 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase {
         for (int i = 0; i < numDocs; i++) {
             // Index doc but not advance local checkpoint.
             shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
-                SourceToParse.source(shard.shardId().getIndexName(), "test", Integer.toString(i), new BytesArray("{}"), XContentType.JSON),
-                IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(shard, "test"));
+                SourceToParse.source(shard.shardId().getIndexName(), "_doc", Integer.toString(i), new BytesArray("{}"), XContentType.JSON),
+                IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
         }
 
         String allocationId = shard.routingEntry().allocationId().getId();
diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java
index 91b35594772..3b50fa64915 100644
--- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java
@@ -52,7 +52,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
             final long initDocs = scaledRandomIntBetween(1, 10);
             {
                 for (int i = 0; i < initDocs; i++) {
-                    indexDoc(replica, "doc", Integer.toString(i));
+                    indexDoc(replica, "_doc", Integer.toString(i));
                     if (randomBoolean()) {
                         flushShard(replica);
                     }
@@ -68,7 +68,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
             final int moreDocs = randomIntBetween(1, 10);
             {
                 for (int i = 0; i < moreDocs; i++) {
-                    indexDoc(replica, "doc", Long.toString(i));
+                    indexDoc(replica, "_doc", Long.toString(i));
                     if (randomBoolean()) {
                         flushShard(replica);
                     }
diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java
index 4e9d0ccb22e..537409f35d1 100644
--- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java
+++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java
@@ -122,23 +122,23 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
             final String indexName = orgReplica.shardId().getIndexName();
 
             // delete #1
-            orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id", VersionType.EXTERNAL, u -> {});
+            orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id", VersionType.EXTERNAL);
             getTranslog(orgReplica).rollGeneration(); // isolate the delete in it's own generation
             // index #0
             orgReplica.applyIndexOperationOnReplica(0, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
-                SourceToParse.source(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON), u -> {});
+                SourceToParse.source(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON));
             // index #3
             orgReplica.applyIndexOperationOnReplica(3, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
-                SourceToParse.source(indexName, "type", "id-3", new BytesArray("{}"), XContentType.JSON), u -> {});
+                SourceToParse.source(indexName, "type", "id-3", new BytesArray("{}"), XContentType.JSON));
             // Flushing a new commit with local checkpoint=1 allows to delete the translog gen #1.
             orgReplica.flush(new FlushRequest().force(true).waitIfOngoing(true));
             // index #2
             orgReplica.applyIndexOperationOnReplica(2, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
-                SourceToParse.source(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON), u -> {});
+                SourceToParse.source(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON));
             orgReplica.updateGlobalCheckpointOnReplica(3L, "test");
             // index #5 -> force NoOp #4.
             orgReplica.applyIndexOperationOnReplica(5, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
-                SourceToParse.source(indexName, "type", "id-5", new BytesArray("{}"), XContentType.JSON), u -> {});
+                SourceToParse.source(indexName, "type", "id-5", new BytesArray("{}"), XContentType.JSON));
 
             final int translogOps;
             if (randomBoolean()) {
@@ -247,9 +247,11 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
         int numDocs = between(1, 100);
         long globalCheckpoint = 0;
         for (int i = 0; i < numDocs; i++) {
-            primaryShard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
-                SourceToParse.source(primaryShard.shardId().getIndexName(), "test", Integer.toString(i), new BytesArray("{}"),
-                    XContentType.JSON), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(primaryShard, "test"));
+            Engine.IndexResult result = primaryShard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
+                SourceToParse.source(primaryShard.shardId().getIndexName(), "_doc", Integer.toString(i), new BytesArray("{}"),
+                    XContentType.JSON),
+                IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
+            assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS));
             if (randomBoolean()) {
                 globalCheckpoint = randomLongBetween(globalCheckpoint, i);
                 primaryShard.updateLocalCheckpointForShard(primaryShard.routingEntry().allocationId().getId(), globalCheckpoint);
diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java
index 63236b16554..69ea552835b 100644
--- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java
+++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java
@@ -20,13 +20,13 @@
 package org.elasticsearch.repositories.blobstore;
 
 import org.apache.lucene.store.Directory;
-import org.elasticsearch.core.internal.io.IOUtils;
 import org.apache.lucene.util.TestUtil;
 import org.elasticsearch.cluster.metadata.RepositoryMetaData;
 import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.cluster.routing.ShardRoutingHelper;
 import org.elasticsearch.common.UUIDs;
 import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.core.internal.io.IOUtils;
 import org.elasticsearch.env.Environment;
 import org.elasticsearch.env.TestEnvironment;
 import org.elasticsearch.index.shard.IndexShard;
@@ -72,7 +72,7 @@ public class BlobStoreRepositoryRestoreTests extends IndexShardTestCase {
             final int numDocs = scaledRandomIntBetween(1, 500);
             recoverShardFromStore(shard);
             for (int i = 0; i < numDocs; i++) {
-                indexDoc(shard, "doc", Integer.toString(i));
+                indexDoc(shard, "_doc", Integer.toString(i));
                 if (rarely()) {
                     flushShard(shard, false);
                 }
@@ -138,7 +138,7 @@ public class BlobStoreRepositoryRestoreTests extends IndexShardTestCase {
             final int numDocs = scaledRandomIntBetween(1, 500);
             recoverShardFromStore(shard);
             for (int i = 0; i < numDocs; i++) {
-                indexDoc(shard, "doc", Integer.toString(i));
+                indexDoc(shard, "_doc", Integer.toString(i));
                 if (rarely()) {
                     flushShard(shard, false);
                 }
diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java
index a0e1cfc3341..74cc8cb9141 100644
--- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java
@@ -25,10 +25,10 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
-import org.elasticsearch.ExceptionsHelper;
 import org.elasticsearch.Version;
 import org.elasticsearch.action.admin.indices.flush.FlushRequest;
 import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.support.replication.TransportReplicationAction;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
@@ -58,7 +58,6 @@ import org.elasticsearch.index.engine.EngineFactory;
 import org.elasticsearch.index.engine.EngineTestCase;
 import org.elasticsearch.index.mapper.IdFieldMapper;
 import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.Mapping;
 import org.elasticsearch.index.mapper.SourceToParse;
 import org.elasticsearch.index.mapper.Uid;
 import org.elasticsearch.index.seqno.ReplicationTracker;
@@ -94,7 +93,6 @@ import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.BiFunction;
-import java.util.function.Consumer;
 
 import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting;
 import static org.hamcrest.Matchers.contains;
@@ -184,7 +182,8 @@ public abstract class IndexShardTestCase extends ESTestCase {
             .build();
         IndexMetaData.Builder metaData = IndexMetaData.builder(shardRouting.getIndexName())
             .settings(settings)
-            .primaryTerm(0, primaryTerm);
+            .primaryTerm(0, primaryTerm)
+            .putMapping("_doc", "{ \"properties\": {} }");
         return newShard(shardRouting, metaData.build(), listeners);
     }
 
@@ -559,27 +558,27 @@ public abstract class IndexShardTestCase extends ESTestCase {
         throws IOException {
         SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), type, id, new BytesArray(source), xContentType);
         sourceToParse.routing(routing);
+        Engine.IndexResult result;
         if (shard.routingEntry().primary()) {
-            final Engine.IndexResult result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse,
-                IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(shard, type));
+            result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse,
+                IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
+            if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
+                updateMappings(shard, IndexMetaData.builder(shard.indexSettings().getIndexMetaData())
+                    .putMapping(type, result.getRequiredMappingUpdate().toString()).build());
+                result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse,
+                    IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
+            }
             shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(),
                 shard.getEngine().getLocalCheckpointTracker().getCheckpoint());
-            return result;
         } else {
-            return shard.applyIndexOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0,
-                VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse, getMappingUpdater(shard, type));
-        }
-    }
-
-    protected Consumer getMappingUpdater(IndexShard shard, String type) {
-        return update -> {
-            try {
-                updateMappings(shard, IndexMetaData.builder(shard.indexSettings().getIndexMetaData())
-                    .putMapping(type, update.toString()).build());
-            } catch (IOException e) {
-                ExceptionsHelper.reThrowIfNotNull(e);
+            result = shard.applyIndexOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0,
+                VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
+            if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
+                throw new TransportReplicationAction.RetryOnReplicaException(shard.shardId,
+                    "Mappings are not available on the replica yet, triggered update: " + result.getRequiredMappingUpdate());
             }
-        };
+        }
+        return result;
     }
 
     protected void updateMappings(IndexShard shard, IndexMetaData indexMetadata) {
@@ -589,10 +588,9 @@ public abstract class IndexShardTestCase extends ESTestCase {
 
     protected Engine.DeleteResult deleteDoc(IndexShard shard, String type, String id) throws IOException {
         if (shard.routingEntry().primary()) {
-            return shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, type, id, VersionType.INTERNAL, update -> {});
+            return shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, type, id, VersionType.INTERNAL);
         } else {
-            return shard.applyDeleteOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1,
-                0L, type, id, VersionType.EXTERNAL, update -> {});
+            return shard.applyDeleteOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0L, type, id, VersionType.EXTERNAL);
         }
     }
 
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
index d127f1a6b36..cd1aa6b020d 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
@@ -19,7 +19,6 @@
 package org.elasticsearch.test;
 
 import com.carrotsearch.randomizedtesting.RandomizedContext;
-import org.elasticsearch.core.internal.io.IOUtils;
 import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
 import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
 import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
@@ -38,6 +37,7 @@ import org.elasticsearch.common.util.BigArrays;
 import org.elasticsearch.common.util.concurrent.EsExecutors;
 import org.elasticsearch.common.xcontent.NamedXContentRegistry;
 import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.core.internal.io.IOUtils;
 import org.elasticsearch.env.Environment;
 import org.elasticsearch.env.NodeEnvironment;
 import org.elasticsearch.index.Index;
@@ -252,7 +252,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
      */
     protected IndexService createIndex(String index, Settings settings, String type, Object... mappings) {
         CreateIndexRequestBuilder createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings);
-        if (type != null && mappings != null) {
+        if (type != null) {
             createIndexRequestBuilder.addMapping(type, mappings);
         }
         return createIndex(index, createIndexRequestBuilder);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockMasterServiceOnMaster.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockMasterServiceOnMaster.java
new file mode 100644
index 00000000000..0547ce70f2f
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockMasterServiceOnMaster.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.apache.logging.log4j.core.util.Throwables;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.InternalTestCluster;
+
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+public class BlockMasterServiceOnMaster extends SingleNodeDisruption {
+
+    AtomicReference disruptionLatch = new AtomicReference<>();
+
+
+    public BlockMasterServiceOnMaster(Random random) {
+        super(random);
+    }
+
+
+    @Override
+    public void startDisrupting() {
+        disruptedNode = cluster.getMasterName();
+        final String disruptionNodeCopy = disruptedNode;
+        if (disruptionNodeCopy == null) {
+            return;
+        }
+        ClusterService clusterService = cluster.getInstance(ClusterService.class, disruptionNodeCopy);
+        if (clusterService == null) {
+            return;
+        }
+        logger.info("blocking master service on node [{}]", disruptionNodeCopy);
+        boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1));
+        assert success : "startDisrupting called without waiting on stopDisrupting to complete";
+        final CountDownLatch started = new CountDownLatch(1);
+        clusterService.getMasterService().submitStateUpdateTask("service_disruption_block", new ClusterStateUpdateTask() {
+            @Override
+            public Priority priority() {
+                return Priority.IMMEDIATE;
+            }
+
+            @Override
+            public ClusterState execute(ClusterState currentState) throws Exception {
+                started.countDown();
+                CountDownLatch latch = disruptionLatch.get();
+                if (latch != null) {
+                    try {
+                        latch.await();
+                    } catch (InterruptedException e) {
+                        Throwables.rethrow(e);
+                    }
+                }
+                return currentState;
+            }
+
+            @Override
+            public void onFailure(String source, Exception e) {
+                logger.error("unexpected error during disruption", e);
+            }
+        });
+        try {
+            started.await();
+        } catch (InterruptedException e) {
+        }
+    }
+
+    @Override
+    public void stopDisrupting() {
+        CountDownLatch latch = disruptionLatch.get();
+        if (latch != null) {
+            latch.countDown();
+        }
+
+    }
+
+    @Override
+    public void removeAndEnsureHealthy(InternalTestCluster cluster) {
+        removeFromCluster(cluster);
+    }
+
+    @Override
+    public TimeValue expectedTimeToHeal() {
+        return TimeValue.timeValueMinutes(0);
+    }
+}
diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java
index 582ef6abe1e..752f97b6b1a 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java
@@ -31,7 +31,6 @@ import org.elasticsearch.index.Index;
 import org.elasticsearch.index.engine.Engine;
 import org.elasticsearch.index.shard.ShardId;
 import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.test.VersionUtils;
 import org.elasticsearch.xpack.core.watcher.watch.ClockMock;
 import org.elasticsearch.xpack.core.watcher.watch.Watch;
 import org.elasticsearch.xpack.core.watcher.watch.WatchStatus;
@@ -230,7 +229,7 @@ public class WatcherIndexingListenerTests extends ESTestCase {
         when(operation.id()).thenReturn("_id");
         when(operation.type()).thenReturn(Watch.DOC_TYPE);
         when(shardId.getIndexName()).thenReturn("anything");
-        when(result.hasFailure()).thenReturn(false);
+        when(result.getResultType()).thenReturn(Engine.Result.Type.SUCCESS);
 
         listener.postIndex(shardId, operation, new ElasticsearchParseException("whatever"));
         verifyZeroInteractions(triggerService);
@@ -730,4 +729,4 @@ public class WatcherIndexingListenerTests extends ESTestCase {
         return new DiscoveryNode(nodeId, ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(),
                 new HashSet<>(asList(DiscoveryNode.Role.values())), Version.CURRENT);
     }
-}
\ No newline at end of file
+}

From 80e48bbcdeb4ce46d5d1b522a5b771ea51f5dffa Mon Sep 17 00:00:00 2001
From: Luca Cavanna 
Date: Tue, 1 May 2018 09:33:17 +0200
Subject: [PATCH 35/68] Remove animal sniffer from REST client sniffer (#30260)

Animal sniffer is no longer needed, we can remove it for sniffer like
we did for the low-level REST client with #29646
---
 client/sniffer/build.gradle                                    | 3 ---
 .../client/sniff/ElasticsearchHostsSnifferTests.java           | 2 --
 2 files changed, 5 deletions(-)

diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle
index 03e4a082d27..e226656dbd2 100644
--- a/client/sniffer/build.gradle
+++ b/client/sniffer/build.gradle
@@ -20,7 +20,6 @@
 import org.elasticsearch.gradle.precommit.PrecommitTasks
 
 apply plugin: 'elasticsearch.build'
-apply plugin: 'ru.vyarus.animalsniffer'
 apply plugin: 'nebula.maven-base-publish'
 apply plugin: 'nebula.maven-scm'
 
@@ -52,8 +51,6 @@ dependencies {
   testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
   testCompile "org.elasticsearch:securemock:${versions.securemock}"
   testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}"
-  testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15"
-  signature "org.codehaus.mojo.signature:java17:1.0@signature"
 }
 
 forbiddenApisMain {
diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java
index 483b7df62f9..f13d1751104 100644
--- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java
+++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java
@@ -60,8 +60,6 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.fail;
 
-//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
-@IgnoreJRERequirement
 public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
 
     private int sniffRequestTimeout;

From 74504acb0dbab7560039598edfe99ac4bdff8d4b Mon Sep 17 00:00:00 2001
From: Luca Cavanna 
Date: Tue, 1 May 2018 09:33:31 +0200
Subject: [PATCH 36/68] Remove `Request.Params#flatSettings` leftover (#29676)

Relates to #29560
---
 .../src/main/java/org/elasticsearch/client/Request.java    | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java
index d68d3b309af..192da83f8b4 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java
@@ -841,13 +841,6 @@ public final class Request {
             return this;
         }
 
-        Params withFlatSettings(boolean flatSettings) {
-            if (flatSettings) {
-                return putParam("flat_settings", Boolean.TRUE.toString());
-            }
-            return this;
-        }
-
         Params withIncludeDefaults(boolean includeDefaults) {
             if (includeDefaults) {
                 return putParam("include_defaults", Boolean.TRUE.toString());

From acdf330a0e863ef85829f3fb6e6b732c796fe3b6 Mon Sep 17 00:00:00 2001
From: Luca Cavanna 
Date: Tue, 1 May 2018 09:35:09 +0200
Subject: [PATCH 37/68] Minor DocWriteResponse changes (#29675)

Remove double if depending on the Result value. It makes little sense to
pass in a boolean flag based on a Result value that we already have,
if that internally is represented again as a `Result` value.

Also changed the `Result` `lowercase` instance member to be computed
based on `name()` instead of `toString()` which is safer and to use
`Locale.ROOT` instead of `Locale.ENGLISH`
---
 .../elasticsearch/action/DocWriteResponse.java    |  2 +-
 .../action/delete/DeleteResponse.java             | 14 +++++++++++---
 .../elasticsearch/action/index/IndexResponse.java | 15 +++++++++++----
 3 files changed, 23 insertions(+), 8 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java
index 8fa183a8434..7273bf29462 100644
--- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java
@@ -75,7 +75,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
 
         Result(int op) {
             this.op = (byte) op;
-            this.lowercase = this.toString().toLowerCase(Locale.ENGLISH);
+            this.lowercase = this.name().toLowerCase(Locale.ROOT);
         }
 
         public byte getOp() {
diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java
index a7b9b064732..14b7f65239b 100644
--- a/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java
@@ -40,7 +40,16 @@ public class DeleteResponse extends DocWriteResponse {
     }
 
     public DeleteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, boolean found) {
-        super(shardId, type, id, seqNo, primaryTerm, version, found ? Result.DELETED : Result.NOT_FOUND);
+        this(shardId, type, id, seqNo, primaryTerm, version, found ? Result.DELETED : Result.NOT_FOUND);
+    }
+
+    private DeleteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) {
+        super(shardId, type, id, seqNo, primaryTerm, version, assertDeletedOrNotFound(result));
+    }
+
+    private static Result assertDeletedOrNotFound(Result result) {
+        assert result == Result.DELETED || result == Result.NOT_FOUND;
+        return result;
     }
 
     @Override
@@ -87,8 +96,7 @@ public class DeleteResponse extends DocWriteResponse {
 
         @Override
         public DeleteResponse build() {
-            DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, primaryTerm, version,
-                    result == Result.DELETED ? true : false);
+            DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, primaryTerm, version, result);
             deleteResponse.setForcedRefresh(forcedRefresh);
             if (shardInfo != null) {
                 deleteResponse.setShardInfo(shardInfo);
diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java
index 743086165f6..3174e4d8ab1 100644
--- a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java
@@ -41,7 +41,16 @@ public class IndexResponse extends DocWriteResponse {
     }
 
     public IndexResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, boolean created) {
-        super(shardId, type, id, seqNo, primaryTerm, version, created ? Result.CREATED : Result.UPDATED);
+        this(shardId, type, id, seqNo, primaryTerm, version, created ? Result.CREATED : Result.UPDATED);
+    }
+
+    private IndexResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) {
+        super(shardId, type, id, seqNo, primaryTerm, version, assertCreatedOrUpdated(result));
+    }
+
+    private static Result assertCreatedOrUpdated(Result result) {
+        assert result == Result.CREATED || result == Result.UPDATED;
+        return result;
     }
 
     @Override
@@ -87,11 +96,9 @@ public class IndexResponse extends DocWriteResponse {
      * instantiate the {@link IndexResponse}.
      */
     public static class Builder extends DocWriteResponse.Builder {
-
         @Override
         public IndexResponse build() {
-            IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, primaryTerm, version,
-                    result == Result.CREATED ? true : false);
+            IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, primaryTerm, version, result);
             indexResponse.setForcedRefresh(forcedRefresh);
             if (shardInfo != null) {
                 indexResponse.setShardInfo(shardInfo);

From 057cdffed54745022509322e65600f10c415421e Mon Sep 17 00:00:00 2001
From: Dimitris Athanasiou 
Date: Tue, 1 May 2018 09:50:32 +0100
Subject: [PATCH 38/68] [ML] Refactor DataStreamDiagnostics to use array
 (#30129)

This commit refactors the DataStreamDiagnostics class
achieving the following advantages:

- simpler code; by encapsulating the moving bucket histogram
into its own class
- better performance; by using an array to store the buckets
instead of a map
- explicit handling of gap buckets; in preparation of fixing #30080
---
 .../ml/job/process/DataCountsReporter.java    |   3 +-
 .../ml/job/process/DataStreamDiagnostics.java | 223 ------------------
 .../diagnostics/BucketDiagnostics.java        | 132 +++++++++++
 .../diagnostics/DataStreamDiagnostics.java    | 113 +++++++++
 .../DataStreamDiagnosticsTests.java           |  78 +++++-
 5 files changed, 319 insertions(+), 230 deletions(-)
 delete mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/DataStreamDiagnostics.java
 create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/diagnostics/BucketDiagnostics.java
 create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/diagnostics/DataStreamDiagnostics.java
 rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/{ => diagnostics}/DataStreamDiagnosticsTests.java (81%)

diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/DataCountsReporter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/DataCountsReporter.java
index debe1c36bba..80223027e8e 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/DataCountsReporter.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/DataCountsReporter.java
@@ -12,8 +12,9 @@ import org.elasticsearch.common.settings.Setting.Property;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.xpack.core.ml.job.config.Job;
-import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister;
 import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts;
+import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister;
+import org.elasticsearch.xpack.ml.job.process.diagnostics.DataStreamDiagnostics;
 
 import java.util.Date;
 import java.util.Locale;
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/DataStreamDiagnostics.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/DataStreamDiagnostics.java
deleted file mode 100644
index b7332042586..00000000000
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/DataStreamDiagnostics.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License;
- * you may not use this file except in compliance with the Elastic License.
- */
-package org.elasticsearch.xpack.ml.job.process;
-
-import org.apache.logging.log4j.Logger;
-import org.apache.lucene.util.Counter;
-import org.elasticsearch.common.logging.Loggers;
-import org.elasticsearch.xpack.core.ml.job.config.Job;
-
-import java.util.Date;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-public class DataStreamDiagnostics {
-
-    /**
-     * Minimum window to take into consideration for bucket count histogram.
-     */
-    private static final int MIN_BUCKET_WINDOW = 10;
-
-    /**
-     * Threshold to report potential sparsity problems.
-     * 
-     * Sparsity score is calculated: log(average) - log(current)
-     * 
-     * If score is above the threshold, bucket is reported as sparse bucket.
-     */
-    private static final int DATA_SPARSITY_THRESHOLD = 2;
-    private static final long MS_IN_SECOND = 1000;
-
-    private static final Logger LOGGER = Loggers.getLogger(DataStreamDiagnostics.class);
-    /**
-     * Container for the histogram
-     *
-     * Note: Using a sorted map in order to iterate in order when consuming the
-     * data. The counter is lazily initialized and potentially missing in case
-     * of empty buckets.
-     *
-     * The container gets pruned along the data streaming based on the bucket
-     * window, so it should not contain more than max(MIN_BUCKET_WINDOW,
-     * 'buckets_required_by_latency') + 1 items at any time.
-     * 
-     * Sparsity can only be calculated after the window has been filled. Currently
-     * this window is lost if a job gets closed and re-opened. We might fix this 
-     * in future.
-     */
-    private final SortedMap movingBucketHistogram = new TreeMap<>();
-
-    private final long bucketSpan;
-    private final long latency;
-    private long movingBucketCount = 0;
-    private long latestReportedBucket = -1;
-
-    private long bucketCount = 0;
-    private long emptyBucketCount = 0;
-    private long latestEmptyBucketTime = -1;
-    private long sparseBucketCount = 0;
-    private long latestSparseBucketTime = -1;
-    
-    public DataStreamDiagnostics(Job job) {
-        bucketSpan = job.getAnalysisConfig().getBucketSpan().seconds();
-        latency = job.getAnalysisConfig().getLatency() == null ? 0 : job.getAnalysisConfig().getLatency().seconds();
-    }
-
-    /**
-     * Check record
-     * 
-     * @param recordTimestampInMs
-     *            The record timestamp in milliseconds since epoch
-     */
-
-    public void checkRecord(long recordTimestampInMs) {
-        checkBucketing(recordTimestampInMs);
-    }
-
-    /**
-     * Flush all counters, should be called at the end of the data stream
-     */
-    public void flush() {
-        // flush all we know
-        if (movingBucketHistogram.isEmpty() == false) {
-            flush(movingBucketHistogram.lastKey());
-        }
-    }
-
-    /**
-     * Check bucketing of record. Report empty and sparse buckets.
-     * 
-     * @param recordTimestampInMs
-     *            The record timestamp in milliseconds since epoch
-     */
-    private void checkBucketing(long recordTimestampInMs) {
-        long bucket = (recordTimestampInMs / MS_IN_SECOND) / bucketSpan;
-        long bucketHistogramStartBucket = ((recordTimestampInMs / MS_IN_SECOND) - latency) / bucketSpan;
-
-        bucketHistogramStartBucket = Math.min(bucket - MIN_BUCKET_WINDOW, bucketHistogramStartBucket);
-
-        movingBucketHistogram.computeIfAbsent(bucket, l -> Counter.newCounter()).addAndGet(1);
-        ++movingBucketCount;
-
-        // find the very first bucket
-        if (latestReportedBucket == -1) {
-            latestReportedBucket = bucket - 1;
-        }
-
-        // flush all bucket out of the window
-        flush(bucketHistogramStartBucket);
-    }
-
-    /**
-     * Flush Bucket reporting till the given bucket.
-     * 
-     * @param bucketNumber
-     *            The number of the last bucket that can be flushed.
-     */
-    private void flush(long bucketNumber) {
-
-        // check for a longer period of empty buckets
-        long emptyBuckets = movingBucketHistogram.firstKey() - latestReportedBucket - 1;
-        if (emptyBuckets > 0) {
-            bucketCount += emptyBuckets;
-            emptyBucketCount += emptyBuckets;
-            latestEmptyBucketTime = (movingBucketHistogram.firstKey() - 1) * bucketSpan * MS_IN_SECOND;
-            latestReportedBucket = movingBucketHistogram.firstKey() - 1;
-        }
-
-        // calculate the average number of data points in a bucket based on the
-        // current history
-        double averageBucketSize = (float) movingBucketCount / movingBucketHistogram.size();
-
-        // prune all buckets that can be flushed
-        long lastBucketSparsityCheck = Math.min(bucketNumber, movingBucketHistogram.lastKey());
-
-        for (long pruneBucket = movingBucketHistogram.firstKey(); pruneBucket < lastBucketSparsityCheck; ++pruneBucket) {
-
-            Counter bucketSizeHolder = movingBucketHistogram.remove(pruneBucket);
-            long bucketSize = bucketSizeHolder != null ? bucketSizeHolder.get() : 0L;
-
-            LOGGER.debug("Checking bucket {} compare sizes, this bucket: {} average: {}", pruneBucket, bucketSize, averageBucketSize);
-            ++bucketCount;
-            latestReportedBucket = pruneBucket;
-
-            // substract bucketSize from the counter
-            movingBucketCount -= bucketSize;
-
-            // check if bucket is empty
-            if (bucketSize == 0L) {
-                latestEmptyBucketTime = pruneBucket * bucketSpan * MS_IN_SECOND;
-                ++emptyBucketCount;
-
-                // do not do sparse analysis on an empty bucket
-                continue;
-            }
-
-            // simplistic way to calculate data sparsity, just take the log and
-            // check the difference
-            double logAverageBucketSize = Math.log(averageBucketSize);
-            double logBucketSize = Math.log(bucketSize);
-            double sparsityScore = logAverageBucketSize - logBucketSize;
-
-            if (sparsityScore > DATA_SPARSITY_THRESHOLD) {
-                LOGGER.debug("Sparse bucket {}, this bucket: {} average: {}, sparsity score: {}", pruneBucket, bucketSize,
-                        averageBucketSize, sparsityScore);
-                ++sparseBucketCount;
-                latestSparseBucketTime = pruneBucket * bucketSpan * MS_IN_SECOND;
-            }
-        }
-
-        // prune the rest if necessary
-        for (long pruneBucket = lastBucketSparsityCheck; pruneBucket < bucketNumber; ++pruneBucket) {
-            Counter bucketSizeHolder = movingBucketHistogram.remove(pruneBucket);
-            long bucketSize = bucketSizeHolder != null ? bucketSizeHolder.get() : 0L;
-
-            bucketCount++;
-            latestReportedBucket = pruneBucket;
-
-            // substract bucketSize from the counter
-            movingBucketCount -= bucketSize;
-
-            // check if bucket is empty
-            if (bucketSize == 0L) {
-                latestEmptyBucketTime = pruneBucket * bucketSpan * MS_IN_SECOND;
-                ++emptyBucketCount;
-            }
-        }
-    }
-
-    public long getBucketCount() {
-        return bucketCount;
-    }
-
-    public long getEmptyBucketCount() {
-        return emptyBucketCount;
-    }
-
-    public Date getLatestEmptyBucketTime() {
-        return latestEmptyBucketTime > 0 ? new Date(latestEmptyBucketTime) : null;
-    }
-
-    public long getSparseBucketCount() {
-        return sparseBucketCount;
-    }
-
-    public Date getLatestSparseBucketTime() {
-        return latestSparseBucketTime > 0 ? new Date(latestSparseBucketTime) : null;
-    }
-    
-    /**
-     * Resets counts,
-     * 
-     * Note: This does not reset the inner state for e.g. sparse bucket
-     * detection.
-     *
-     */
-    public void resetCounts() {
-        bucketCount = 0;
-        emptyBucketCount = 0;
-        sparseBucketCount = 0;
-    }
-}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/diagnostics/BucketDiagnostics.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/diagnostics/BucketDiagnostics.java
new file mode 100644
index 00000000000..c61926dfb04
--- /dev/null
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/diagnostics/BucketDiagnostics.java
@@ -0,0 +1,132 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.ml.job.process.diagnostics;
+
+import org.elasticsearch.xpack.core.ml.job.config.Job;
+import org.elasticsearch.xpack.core.ml.utils.Intervals;
+
+/**
+ * A moving window of buckets that allow keeping
+ * track of some statistics like the bucket count,
+ * empty or sparse buckets, etc.
+ *
+ * The counts are stored in an array that functions as a
+ * circular buffer. When time is advanced, all buckets
+ * out of the window are flushed.
+ */
+class BucketDiagnostics {
+
+    private static final int MIN_BUCKETS = 10;
+
+    private final long bucketSpanMs;
+    private final long latencyMs;
+    private final int maxSize;
+    private final long[] buckets;
+    private long movingBucketCount = 0;
+    private long latestBucketStartMs = -1;
+    private int latestBucketIndex;
+    private long earliestBucketStartMs = -1;
+    private int earliestBucketIndex;
+    private long latestFlushedBucketStartMs = -1;
+    private final BucketFlushListener bucketFlushListener;
+
+    BucketDiagnostics(Job job, BucketFlushListener bucketFlushListener) {
+        bucketSpanMs = job.getAnalysisConfig().getBucketSpan().millis();
+        latencyMs = job.getAnalysisConfig().getLatency() == null ? 0 : job.getAnalysisConfig().getLatency().millis();
+        maxSize = Math.max((int) (Intervals.alignToCeil(latencyMs, bucketSpanMs) / bucketSpanMs), MIN_BUCKETS);
+        buckets = new long[maxSize];
+        this.bucketFlushListener = bucketFlushListener;
+    }
+
+    void addRecord(long recordTimestampMs) {
+        long bucketStartMs = Intervals.alignToFloor(recordTimestampMs, bucketSpanMs);
+
+        // Initialize earliest/latest times
+        if (latestBucketStartMs < 0) {
+            latestBucketStartMs = bucketStartMs;
+            earliestBucketStartMs = bucketStartMs;
+        }
+
+        advanceTime(bucketStartMs);
+        addToBucket(bucketStartMs);
+    }
+
+    private void advanceTime(long bucketStartMs) {
+        while (bucketStartMs > latestBucketStartMs) {
+            int flushBucketIndex = (latestBucketIndex + 1) % maxSize;
+
+            if (flushBucketIndex == earliestBucketIndex) {
+                flush(flushBucketIndex);
+                movingBucketCount -= buckets[flushBucketIndex];
+                earliestBucketStartMs += bucketSpanMs;
+                earliestBucketIndex = (earliestBucketIndex + 1) % maxSize;
+            }
+            buckets[flushBucketIndex] = 0L;
+
+            latestBucketStartMs += bucketSpanMs;
+            latestBucketIndex = flushBucketIndex;
+        }
+    }
+
+    private void addToBucket(long bucketStartMs) {
+        int offsetToLatest = (int) ((bucketStartMs - latestBucketStartMs) / bucketSpanMs);
+        int bucketIndex = (latestBucketIndex + offsetToLatest) % maxSize;
+        if (bucketIndex < 0) {
+            bucketIndex = maxSize + bucketIndex;
+        }
+
+        ++buckets[bucketIndex];
+        ++movingBucketCount;
+
+        if (bucketStartMs < earliestBucketStartMs) {
+            earliestBucketStartMs = bucketStartMs;
+            earliestBucketIndex = bucketIndex;
+        }
+    }
+
+    private void flush(int bucketIndex) {
+        long bucketStartMs = getTimestampMs(bucketIndex);
+        if (bucketStartMs > latestFlushedBucketStartMs) {
+            bucketFlushListener.onBucketFlush(bucketStartMs, buckets[bucketIndex]);
+            latestFlushedBucketStartMs = bucketStartMs;
+        }
+    }
+
+    private long getTimestampMs(int bucketIndex) {
+        int offsetToLatest = latestBucketIndex - bucketIndex;
+        if (offsetToLatest < 0) {
+            offsetToLatest = maxSize + offsetToLatest;
+        }
+        return latestBucketStartMs - offsetToLatest * bucketSpanMs;
+    }
+
+    void flush() {
+        if (latestBucketStartMs < 0) {
+            return;
+        }
+
+        int bucketIndex = earliestBucketIndex;
+        while (bucketIndex != latestBucketIndex) {
+            flush(bucketIndex);
+            bucketIndex = (bucketIndex + 1) % maxSize;
+        }
+    }
+
+    double averageBucketCount() {
+        return (double) movingBucketCount / size();
+    }
+
+    private int size() {
+        if (latestBucketStartMs < 0) {
+            return 0;
+        }
+        return (int) ((latestBucketStartMs - earliestBucketStartMs) / bucketSpanMs) + 1;
+    }
+
+    interface BucketFlushListener {
+        void onBucketFlush(long bucketStartMs, long bucketCounts);
+    }
+}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/diagnostics/DataStreamDiagnostics.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/diagnostics/DataStreamDiagnostics.java
new file mode 100644
index 00000000000..a19f6eba023
--- /dev/null
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/diagnostics/DataStreamDiagnostics.java
@@ -0,0 +1,113 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.ml.job.process.diagnostics;
+
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.xpack.core.ml.job.config.Job;
+
+import java.util.Date;
+
+public class DataStreamDiagnostics {
+
+    /**
+     * Threshold to report potential sparsity problems.
+     *
+     * Sparsity score is calculated: log(average) - log(current)
+     *
+     * If score is above the threshold, bucket is reported as sparse bucket.
+     */
+    private static final int DATA_SPARSITY_THRESHOLD = 2;
+
+    private static final Logger LOGGER = Loggers.getLogger(DataStreamDiagnostics.class);
+
+    private final BucketDiagnostics bucketDiagnostics;
+
+    private long bucketCount = 0;
+    private long emptyBucketCount = 0;
+    private long latestEmptyBucketTime = -1;
+    private long sparseBucketCount = 0;
+    private long latestSparseBucketTime = -1;
+
+    public DataStreamDiagnostics(Job job) {
+        bucketDiagnostics = new BucketDiagnostics(job, createBucketFlushListener());
+    }
+
+    private BucketDiagnostics.BucketFlushListener createBucketFlushListener() {
+        return (flushedBucketStartMs, flushedBucketCount) -> {
+            ++bucketCount;
+            if (flushedBucketCount == 0) {
+                ++emptyBucketCount;
+                latestEmptyBucketTime = flushedBucketStartMs;
+            } else {
+                // simplistic way to calculate data sparsity, just take the log and
+                // check the difference
+                double averageBucketSize = bucketDiagnostics.averageBucketCount();
+                double logAverageBucketSize = Math.log(averageBucketSize);
+                double logBucketSize = Math.log(flushedBucketCount);
+                double sparsityScore = logAverageBucketSize - logBucketSize;
+
+                if (sparsityScore > DATA_SPARSITY_THRESHOLD) {
+                    LOGGER.debug("Sparse bucket {}, this bucket: {} average: {}, sparsity score: {}", flushedBucketStartMs,
+                            flushedBucketCount, averageBucketSize, sparsityScore);
+                    ++sparseBucketCount;
+                    latestSparseBucketTime = flushedBucketStartMs;
+                }
+            }
+        };
+    }
+
+    /**
+     * Check record
+     *
+     * @param recordTimestampInMs
+     *            The record timestamp in milliseconds since epoch
+     */
+    public void checkRecord(long recordTimestampInMs) {
+        bucketDiagnostics.addRecord(recordTimestampInMs);
+    }
+
+    /**
+     * Flush all counters, should be called at the end of the data stream
+     */
+    public void flush() {
+        // flush all we know
+        bucketDiagnostics.flush();
+    }
+
+    public long getBucketCount() {
+        return bucketCount;
+    }
+
+    public long getEmptyBucketCount() {
+        return emptyBucketCount;
+    }
+
+    public Date getLatestEmptyBucketTime() {
+        return latestEmptyBucketTime > 0 ? new Date(latestEmptyBucketTime) : null;
+    }
+
+    public long getSparseBucketCount() {
+        return sparseBucketCount;
+    }
+
+    public Date getLatestSparseBucketTime() {
+        return latestSparseBucketTime > 0 ? new Date(latestSparseBucketTime) : null;
+    }
+
+    /**
+     * Resets counts,
+     *
+     * Note: This does not reset the inner state for e.g. sparse bucket
+     * detection.
+     *
+     */
+    public void resetCounts() {
+        bucketCount = 0;
+        emptyBucketCount = 0;
+        sparseBucketCount = 0;
+    }
+}
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/DataStreamDiagnosticsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/diagnostics/DataStreamDiagnosticsTests.java
similarity index 81%
rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/DataStreamDiagnosticsTests.java
rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/diagnostics/DataStreamDiagnosticsTests.java
index 2c167f0df82..19f7f88c38f 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/DataStreamDiagnosticsTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/diagnostics/DataStreamDiagnosticsTests.java
@@ -3,7 +3,7 @@
  * or more contributor license agreements. Licensed under the Elastic License;
  * you may not use this file except in compliance with the Elastic License.
  */
-package org.elasticsearch.xpack.ml.job.process;
+package org.elasticsearch.xpack.ml.job.process.diagnostics;
 
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.test.ESTestCase;
@@ -13,7 +13,6 @@ import org.elasticsearch.xpack.core.ml.job.config.Detector;
 import org.elasticsearch.xpack.core.ml.job.config.Job;
 import org.junit.Before;
 
-import java.io.IOException;
 import java.util.Arrays;
 import java.util.Date;
 
@@ -21,9 +20,9 @@ public class DataStreamDiagnosticsTests extends ESTestCase {
 
     private static final long BUCKET_SPAN = 60000;
     private Job job;
-    
+
     @Before
-    public void setUpMocks() throws IOException {
+    public void setUpMocks() {
         AnalysisConfig.Builder acBuilder = new AnalysisConfig.Builder(Arrays.asList(new Detector.Builder("metric", "field").build()));
         acBuilder.setBucketSpan(TimeValue.timeValueMillis(BUCKET_SPAN));
         acBuilder.setLatency(TimeValue.ZERO);
@@ -32,7 +31,7 @@ public class DataStreamDiagnosticsTests extends ESTestCase {
         Job.Builder builder = new Job.Builder("job_id");
         builder.setAnalysisConfig(acBuilder);
         builder.setDataDescription(new DataDescription.Builder());
-        job = builder.build(new Date());
+        job = createJob(TimeValue.timeValueMillis(BUCKET_SPAN), null);
     }
 
     public void testIncompleteBuckets() {
@@ -80,6 +79,7 @@ public class DataStreamDiagnosticsTests extends ESTestCase {
         assertEquals(null, d.getLatestSparseBucketTime());
         assertEquals(new Date(BUCKET_SPAN * 2), d.getLatestEmptyBucketTime());
     }
+
     public void testSimple() {
         DataStreamDiagnostics d = new DataStreamDiagnostics(job);
 
@@ -102,6 +102,58 @@ public class DataStreamDiagnosticsTests extends ESTestCase {
         assertEquals(null, d.getLatestEmptyBucketTime());
     }
 
+    public void testSimpleReverse() {
+        DataStreamDiagnostics d = new DataStreamDiagnostics(job);
+
+        d.checkRecord(610000);
+        d.checkRecord(550000);
+        d.checkRecord(490000);
+        d.checkRecord(430000);
+        d.checkRecord(370000);
+        d.checkRecord(310000);
+        d.checkRecord(250000);
+        d.checkRecord(190000);
+        d.checkRecord(130000);
+        d.checkRecord(70000);
+
+        d.flush();
+        assertEquals(9, d.getBucketCount());
+        assertEquals(0, d.getEmptyBucketCount());
+        assertEquals(0, d.getSparseBucketCount());
+        assertEquals(null, d.getLatestSparseBucketTime());
+        assertEquals(null, d.getLatestEmptyBucketTime());
+    }
+
+    public void testWithLatencyLessThanTenBuckets() {
+        job = createJob(TimeValue.timeValueMillis(BUCKET_SPAN), TimeValue.timeValueMillis(3 * BUCKET_SPAN));
+        DataStreamDiagnostics d = new DataStreamDiagnostics(job);
+
+        long timestamp = 70000;
+        while (timestamp < 70000 + 20 * BUCKET_SPAN) {
+            sendManyDataPoints(d, timestamp - BUCKET_SPAN, timestamp + timestamp, 100);
+            timestamp += BUCKET_SPAN;
+        }
+
+        assertEquals(10, d.getBucketCount());
+        d.flush();
+        assertEquals(19, d.getBucketCount());
+    }
+
+    public void testWithLatencyGreaterThanTenBuckets() {
+        job = createJob(TimeValue.timeValueMillis(BUCKET_SPAN), TimeValue.timeValueMillis(13 * BUCKET_SPAN + 10000));
+        DataStreamDiagnostics d = new DataStreamDiagnostics(job);
+
+        long timestamp = 70000;
+        while (timestamp < 70000 + 20 * BUCKET_SPAN) {
+            sendManyDataPoints(d, timestamp - BUCKET_SPAN, timestamp + timestamp, 100);
+            timestamp += BUCKET_SPAN;
+        }
+
+        assertEquals(6, d.getBucketCount());
+        d.flush();
+        assertEquals(19, d.getBucketCount());
+    }
+
     public void testEmptyBuckets() {
         DataStreamDiagnostics d = new DataStreamDiagnostics(job);
 
@@ -280,7 +332,7 @@ public class DataStreamDiagnosticsTests extends ESTestCase {
 
     /**
      * Send signals, make a longer period of sparse signals, then go up again
-     * 
+     *
      * The number of sparse buckets should not be to much, it could be normal.
      */
     public void testSparseBucketsLongerPeriod() {
@@ -307,6 +359,20 @@ public class DataStreamDiagnosticsTests extends ESTestCase {
         assertEquals(null, d.getLatestEmptyBucketTime());
     }
 
+    private static Job createJob(TimeValue bucketSpan, TimeValue latency) {
+        AnalysisConfig.Builder acBuilder = new AnalysisConfig.Builder(Arrays.asList(new Detector.Builder("metric", "field").build()));
+        acBuilder.setBucketSpan(bucketSpan);
+        if (latency != null) {
+            acBuilder.setLatency(latency);
+        }
+        acBuilder.setDetectors(Arrays.asList(new Detector.Builder("metric", "field").build()));
+
+        Job.Builder builder = new Job.Builder("job_id");
+        builder.setAnalysisConfig(acBuilder);
+        builder.setDataDescription(new DataDescription.Builder());
+        return builder.build(new Date());
+    }
+
     public void testFlushAfterZeroRecords() {
         DataStreamDiagnostics d = new DataStreamDiagnostics(job);
         d.flush();

From b9e1860f36b8127a254c8da848ed9913d2ce6838 Mon Sep 17 00:00:00 2001
From: David Turner 
Date: Tue, 1 May 2018 10:03:18 +0100
Subject: [PATCH 39/68] Mark JdbcSqlSpecIT as @AwaitsFix

Relates #30292
---
 .../org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java  | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java
index 609847f513e..caa06135959 100644
--- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java
+++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java
@@ -5,11 +5,13 @@
  */
 package org.elasticsearch.xpack.qa.sql.security;
 
+import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase;
 
 import java.util.Properties;
 
+@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30292")
 public class JdbcSqlSpecIT extends SqlSpecTestCase {
     public JdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) {
         super(fileName, groupName, testName, lineNumber, query);

From 99b98fab18e67dd58c607266e3374f3927c65f54 Mon Sep 17 00:00:00 2001
From: Nik Everett 
Date: Tue, 1 May 2018 07:44:58 -0400
Subject: [PATCH 40/68] Core: Pick inner most parse exception as root cause
 (#30270)

Just like `ElasticsearchException`, the inner most
`XContentParseException` tends to contain the root cause of the
exception and show be show to the user in the `root_cause` field.

The effectively undoes most of the changes that #29373 made to the
`root_cause` for parsing exceptions. The `type` field still changes from
`parse_exception` to `x_content_parse_exception`, but this seems like a
fairly safe change.

`ElasticsearchWrapperException` *looks* tempting to implement this but
the behavior isn't quite right. `ElasticsearchWrapperExceptions` are
entirely unwrapped until the cause no longer
`implements ElasticsearchWrapperException` but `XContentParseException`
should be unwrapped until its cause is no longer an
`XContentParseException` but no further. In other words,
`ElasticsearchWrapperException` are unwrapped one step too far.

Closes #30261
---
 .../elasticsearch/ElasticsearchException.java | 18 +++++++
 .../ElasticsearchWrapperException.java        |  6 ++-
 .../ElasticsearchExceptionTests.java          | 48 ++++++++++++++-----
 3 files changed, 58 insertions(+), 14 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java
index b1c02c4ac27..929c5f49e34 100644
--- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java
+++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java
@@ -31,6 +31,7 @@ import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.logging.LoggerMessageFormat;
 import org.elasticsearch.common.xcontent.ToXContentFragment;
 import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParseException;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.index.Index;
 import org.elasticsearch.index.shard.ShardId;
@@ -635,8 +636,25 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
     public static ElasticsearchException[] guessRootCauses(Throwable t) {
         Throwable ex = ExceptionsHelper.unwrapCause(t);
         if (ex instanceof ElasticsearchException) {
+            // ElasticsearchException knows how to guess its own root cause
             return ((ElasticsearchException) ex).guessRootCauses();
         }
+        if (ex instanceof XContentParseException) {
+            /*
+             * We'd like to unwrap parsing exceptions to the inner-most
+             * parsing exception because that is generally the most interesting
+             * exception to return to the user. If that exception is caused by
+             * an ElasticsearchException we'd like to keep unwrapping because
+             * ElasticserachExceptions tend to contain useful information for
+             * the user.
+             */
+            Throwable cause = ex.getCause();
+            if (cause != null) {
+                if (cause instanceof XContentParseException || cause instanceof ElasticsearchException) {
+                    return guessRootCauses(ex.getCause());
+                }
+            }
+        }
         return new ElasticsearchException[]{new ElasticsearchException(t.getMessage(), t) {
             @Override
             protected String getExceptionName() {
diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java b/server/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java
index 0b809e0923b..7e0fd3a24cb 100644
--- a/server/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java
+++ b/server/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java
@@ -19,7 +19,11 @@
 
 package org.elasticsearch;
 
+/**
+ * An exception that is meant to be "unwrapped" when sent back to the user
+ * as an error because its is {@link #getCause() cause}, if non-null is
+ * always more useful to the user than the exception itself.
+ */
 public interface ElasticsearchWrapperException {
-
     Throwable getCause();
 }
diff --git a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java
index d3560fc6db3..6e4c97fd3da 100644
--- a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java
+++ b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java
@@ -41,6 +41,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentFactory;
 import org.elasticsearch.common.xcontent.XContentHelper;
 import org.elasticsearch.common.xcontent.XContentLocation;
+import org.elasticsearch.common.xcontent.XContentParseException;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.discovery.DiscoverySettings;
@@ -78,6 +79,7 @@ import static org.hamcrest.CoreMatchers.hasItem;
 import static org.hamcrest.CoreMatchers.hasItems;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.instanceOf;
 import static org.hamcrest.Matchers.startsWith;
 
 public class ElasticsearchExceptionTests extends ESTestCase {
@@ -124,13 +126,13 @@ public class ElasticsearchExceptionTests extends ESTestCase {
             } else {
                 rootCauses = ElasticsearchException.guessRootCauses(randomBoolean() ? new RemoteTransportException("remoteboom", ex) : ex);
             }
-            assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "parsing_exception");
-            assertEquals(rootCauses[0].getMessage(), "foobar");
+            assertEquals("parsing_exception", ElasticsearchException.getExceptionName(rootCauses[0]));
+            assertEquals("foobar", rootCauses[0].getMessage());
 
             ElasticsearchException oneLevel = new ElasticsearchException("foo", new RuntimeException("foobar"));
             rootCauses = oneLevel.guessRootCauses();
-            assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "exception");
-            assertEquals(rootCauses[0].getMessage(), "foo");
+            assertEquals("exception", ElasticsearchException.getExceptionName(rootCauses[0]));
+            assertEquals("foo", rootCauses[0].getMessage());
         }
         {
             ShardSearchFailure failure = new ShardSearchFailure(
@@ -146,20 +148,40 @@ public class ElasticsearchExceptionTests extends ESTestCase {
             assertEquals(rootCauses.length, 2);
             assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "parsing_exception");
             assertEquals(rootCauses[0].getMessage(), "foobar");
-            assertEquals(((ParsingException) rootCauses[0]).getLineNumber(), 1);
-            assertEquals(((ParsingException) rootCauses[0]).getColumnNumber(), 2);
-            assertEquals(ElasticsearchException.getExceptionName(rootCauses[1]), "query_shard_exception");
-            assertEquals((rootCauses[1]).getIndex().getName(), "foo1");
-            assertEquals(rootCauses[1].getMessage(), "foobar");
+            assertEquals(1, ((ParsingException) rootCauses[0]).getLineNumber());
+            assertEquals(2, ((ParsingException) rootCauses[0]).getColumnNumber());
+            assertEquals("query_shard_exception", ElasticsearchException.getExceptionName(rootCauses[1]));
+            assertEquals("foo1", rootCauses[1].getIndex().getName());
+            assertEquals("foobar", rootCauses[1].getMessage());
         }
 
         {
             final ElasticsearchException[] foobars = ElasticsearchException.guessRootCauses(new IllegalArgumentException("foobar"));
             assertEquals(foobars.length, 1);
-            assertTrue(foobars[0] instanceof ElasticsearchException);
-            assertEquals(foobars[0].getMessage(), "foobar");
-            assertEquals(foobars[0].getCause().getClass(), IllegalArgumentException.class);
-            assertEquals(foobars[0].getExceptionName(), "illegal_argument_exception");
+            assertThat(foobars[0], instanceOf(ElasticsearchException.class));
+            assertEquals("foobar", foobars[0].getMessage());
+            assertEquals(IllegalArgumentException.class, foobars[0].getCause().getClass());
+            assertEquals("illegal_argument_exception", foobars[0].getExceptionName());
+        }
+
+        {
+            XContentParseException inner = new XContentParseException(null, "inner");
+            XContentParseException outer = new XContentParseException(null, "outer", inner);
+            final ElasticsearchException[] causes = ElasticsearchException.guessRootCauses(outer);
+            assertEquals(causes.length, 1);
+            assertThat(causes[0], instanceOf(ElasticsearchException.class));
+            assertEquals("inner", causes[0].getMessage());
+            assertEquals("x_content_parse_exception", causes[0].getExceptionName());
+        }
+
+        {
+            ElasticsearchException inner = new ElasticsearchException("inner");
+            XContentParseException outer = new XContentParseException(null, "outer", inner);
+            final ElasticsearchException[] causes = ElasticsearchException.guessRootCauses(outer);
+            assertEquals(causes.length, 1);
+            assertThat(causes[0], instanceOf(ElasticsearchException.class));
+            assertEquals("inner", causes[0].getMessage());
+            assertEquals("exception", causes[0].getExceptionName());
         }
     }
 

From 5e9e6fed9009924686afa74f88421b8da84a55d2 Mon Sep 17 00:00:00 2001
From: Nik Everett 
Date: Tue, 1 May 2018 08:32:58 -0400
Subject: [PATCH 41/68] HTML5ify Javadoc for xpack core (#30277)

xpack core contains a fork of `Cron` from quartz who's javadoc has a
`` with non-html5 compatible stuff. This html5ifies the table and
switches the `:x-pack:plugin:core` project to building javadoc with
HTML5.
---
 .../elasticsearch/gradle/BuildPlugin.groovy   |  1 -
 .../xpack/core/scheduler/Cron.java            | 86 +++++++++----------
 2 files changed, 42 insertions(+), 45 deletions(-)

diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
index 4b709508662..4e512b3cdd4 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
@@ -561,7 +561,6 @@ class BuildPlugin implements Plugin {
              */
             List html4Projects = [
                 ':server',
-                ':x-pack:plugin:core',
             ]
             if (false == html4Projects.contains(project.path)) {
                 javadoc.options.addBooleanOption('html5', true)
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java
index f0bc4b98db1..7f4da3fbf1a 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java
@@ -6,7 +6,6 @@
 package org.elasticsearch.xpack.core.scheduler;
 
 import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.ToXContentFragment;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.joda.time.DateTimeZone;
@@ -29,8 +28,7 @@ import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArg
 
 
 /**
- *
- * THIS CLASS IS A COPY OF
+ * THIS CLASS IS A FORK OF
  * 
  *     {@code CronExpression}
  * FROM THE QUARTZ PROJECT
@@ -44,63 +42,63 @@ import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArg
  * Cron expressions are comprised of 6 required fields and one optional field
  * separated by white space. The fields respectively are described as follows:
  *
- * 
+ *
* * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * *
Fields in cron expressions
Field Name Allowed Values Allowed Special CharactersField Name Allowed Values Allowed Special Characters
Seconds 0-59 , - * /Seconds 0-59 , - * /
Minutes 0-59 , - * /Minutes 0-59 , - * /
Hours 0-23 , - * /Hours 0-23 , - * /
Day-of-month 1-31 , - * ? / L WDay-of-month 1-31 , - * ? / L W
Month 0-11 or JAN-DEC , - * /Month 0-11 or JAN-DEC , - * /
Day-of-Week 1-7 or SUN-SAT , - * ? / L #Day-of-Week 1-7 or SUN-SAT , - * ? / L #
Year (Optional) empty, 1970-2199 , - * /Year (Optional) empty, 1970-2199 , - * /
*

From 50535423ffac022052a626409aa74de1b90f2924 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 1 May 2018 08:48:19 -0400 Subject: [PATCH 42/68] Allow copying source settings on resize operation (#30255) Today when an index is created from shrinking or splitting an existing index, the target index inherits almost none of the source index settings. This is surprising and a hassle for operators managing such indices. Given this is the default behavior, we can not simply change it. Instead, we start by introducing the ability to copy settings. This flag can be set on the REST API or on the transport layer and it has the behavior that it copies all settings from the source except non-copyable settings (a property of a setting introduced in this change). Additionally, settings on the request will always override. This change is the first step in our adventure: - this flag is added here in 7.0.0 and immediately deprecated - this flag will be backported to 6.4.0 and remain deprecated - then, we will remove the ability to set this flag to false in 7.0.0 - finally, in 8.0.0 we will remove this flag and the only behavior will be for settings to be copied --- docs/CHANGELOG.asciidoc | 2 + docs/reference/indices/shrink-index.asciidoc | 9 +- docs/reference/indices/split-index.asciidoc | 9 +- .../migration/migrate_7_0/api.asciidoc | 18 +- .../rest-api-spec/api/indices.shrink.json | 4 + .../rest-api-spec/api/indices.split.json | 4 + .../test/indices.shrink/30_copy_settings.yml | 94 ++++++++ .../test/indices.split/30_copy_settings.yml | 98 ++++++++ .../CreateIndexClusterStateUpdateRequest.java | 11 + .../admin/indices/shrink/ResizeRequest.java | 18 ++ .../indices/shrink/TransportResizeAction.java | 26 +- .../metadata/MetaDataCreateIndexService.java | 73 ++++-- .../common/settings/IndexScopedSettings.java | 1 + .../common/settings/Setting.java | 17 +- .../admin/indices/RestResizeHandler.java | 14 ++ .../metadata/IndexCreationTaskTests.java | 4 +- .../MetaDataCreateIndexServiceTests.java | 222 +++++++++++------- .../common/settings/SettingTests.java | 9 +- .../admin/indices/RestResizeHandlerTests.java | 62 +++++ 19 files changed, 577 insertions(+), 118 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml create mode 100644 server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 1ef6534616f..9572bbb35c7 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -24,6 +24,8 @@ === Enhancements +<> ({pull}30255[#30255]) + === Bug Fixes Fail snapshot operations early when creating or deleting a snapshot on a repository that has been diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 2dfc2b4f617..ac1813a6fba 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -121,8 +121,13 @@ POST my_source_index/_shrink/my_target_index NOTE: Mappings may not be specified in the `_shrink` request. -NOTE: By default, with the exception of `index.analysis`, `index.similarity`, and `index.sort` settings, index settings on the source -index are not copied during a shrink operation. +NOTE: By default, with the exception of `index.analysis`, `index.similarity`, +and `index.sort` settings, index settings on the source index are not copied +during a shrink operation. With the exception of non-copyable settings, settings +from the source index can be copied to the target index by adding the URL +parameter `copy_settings=true` to the request. + +deprecated[7.0.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0] [float] === Monitoring the shrink process diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index 8285fa4fa44..094e5e12718 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -177,8 +177,13 @@ POST my_source_index/_split/my_target_index NOTE: Mappings may not be specified in the `_split` request. -NOTE: By default, with the exception of `index.analysis`, `index.similarity`, and `index.sort` settings, index settings on the source -index are not copied during a shrink operation. +NOTE: By default, with the exception of `index.analysis`, `index.similarity`, +and `index.sort` settings, index settings on the source index are not copied +during a split operation. With the exception of non-copyable settings, settings +from the source index can be copied to the target index by adding the URL +parameter `copy_settings=true` to the request. + +deprecated[7.0.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0] [float] === Monitoring the split process diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index 83124584761..91adf7160b5 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -61,8 +61,24 @@ backwards compatibility. Backwards support for the `suggest` metric was deprecated in 6.3.0 and now removed in 7.0.0. [[remove-field-caps-body]] -==== In the fields capabilities API, `fields` can no longer be provided in the request body. In the past, `fields` could be provided either as a parameter, or as part of the request body. Specifying `fields` in the request body as opposed to a parameter was deprecated in 6.4.0, and is now unsupported in 7.0.0. + +[[copy-source-settings-on-resize]] +==== Copying source settings during shrink/split operations + +In prior versions of Elasticsearch, resize operations (shrink/split) would only +copy `index.analysis`, `index.similarity`, and `index.sort` settings from the +source index. Elasticsearch 7.0.0 introduces a request parameter `copy_settings` +which will copy all index settings from the source except for non-copyable index +settings. This parameter defaults to `false` in 7.x, is immediately deprecated +in 7.0.0, will only be able to be set to `true` in 8.x, and will be removed in +9.0.0. Note than when this parameter is used it means that all copyable settings +will be copied; this includes the index blocks that must be put in place for a +resize operation, and any allocation settings put in place in preparation for +executing the resize operation. If you use this parameter, you will either have +to follow up the operation with a request to adjust to the desired settings on +the target index, or send the desired value of these settings with the resize +operation. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json index 5ef943eacba..f92421b79ae 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json @@ -18,6 +18,10 @@ } }, "params": { + "copy_settings": { + "type" : "boolean", + "description" : "whether or not to copy settings from the source index (defaults to false)" + }, "timeout": { "type" : "time", "description" : "Explicit operation timeout" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json index a79fa7b7082..2c14fced28c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json @@ -18,6 +18,10 @@ } }, "params": { + "copy_settings": { + "type" : "boolean", + "description" : "whether or not to copy settings from the source index (defaults to false)" + }, "timeout": { "type" : "time", "description" : "Explicit operation timeout" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml new file mode 100644 index 00000000000..d85a4cb1dd7 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -0,0 +1,94 @@ +--- +"Copy settings during shrink index": + - skip: + version: " - 6.99.99" + reason: copy_settings did not exist prior to 7.0.0 + features: "warnings" + + - do: + cluster.state: {} + + # get master node id + - set: { master_node: master } + + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + # ensure everything is allocated on the master node + index.routing.allocation.include._id: $master + index.number_of_replicas: 0 + index.merge.scheduler.max_merge_count: 4 + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do a actual shrink and copy settings + - do: + indices.shrink: + index: "source" + target: "copy-settings-target" + wait_for_active_shards: 1 + master_timeout: 10s + copy_settings: true + body: + settings: + index.number_of_replicas: 0 + index.merge.scheduler.max_thread_count: 2 + warnings: + - "parameter [copy_settings] is deprecated but was [true]" + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_settings: + index: "copy-settings-target" + + # settings should be copied + - match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } + - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } + - match: { copy-settings-target.settings.index.blocks.write: "true" } + - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } + + # now we do a actual shrink and do not copy settings + - do: + indices.shrink: + index: "source" + target: "no-copy-settings-target" + wait_for_active_shards: 1 + master_timeout: 10s + copy_settings: false + body: + settings: + index.number_of_replicas: 0 + index.merge.scheduler.max_thread_count: 2 + warnings: + - "parameter [copy_settings] is deprecated but was [false]" + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_settings: + index: "no-copy-settings-target" + + # only the request setting should be copied + - is_false: no-copy-settings-target.settings.index.merge.scheduler.max_merge_count + - match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } + - is_false: no-copy-settings-target.settings.index.blocks.write + - is_false: no-copy-settings-target.settings.index.routing.allocation.include._id diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml new file mode 100644 index 00000000000..1bb0a52307d --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -0,0 +1,98 @@ +--- +"Copy settings during split index": + - skip: + version: " - 6.99.99" + reason: copy_settings did not exist prior to 7.0.0 + features: "warnings" + + - do: + cluster.state: {} + + # get master node id + - set: { master_node: master } + + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + # ensure everything is allocated on the master node + index.routing.allocation.include._id: $master + index.number_of_replicas: 0 + index.number_of_shards: 1 + index.number_of_routing_shards: 4 + index.merge.scheduler.max_merge_count: 4 + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do a actual split and copy settings + - do: + indices.split: + index: "source" + target: "copy-settings-target" + wait_for_active_shards: 1 + master_timeout: 10s + copy_settings: true + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 2 + index.merge.scheduler.max_thread_count: 2 + warnings: + - "parameter [copy_settings] is deprecated but was [true]" + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_settings: + index: "copy-settings-target" + + # settings should be copied + - match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } + - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } + - match: { copy-settings-target.settings.index.blocks.write: "true" } + - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } + + # now we do a actual shrink and do not copy settings + - do: + indices.split: + index: "source" + target: "no-copy-settings-target" + wait_for_active_shards: 1 + master_timeout: 10s + copy_settings: false + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 2 + index.merge.scheduler.max_thread_count: 2 + warnings: + - "parameter [copy_settings] is deprecated but was [false]" + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_settings: + index: "no-copy-settings-target" + + # only the request setting should be copied + - is_false: no-copy-settings-target.settings.index.merge.scheduler.max_merge_count + - match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } + - is_false: no-copy-settings-target.settings.index.blocks.write + - is_false: no-copy-settings-target.settings.index.routing.allocation.include._id diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index 4e2e2578875..f2e07e29bad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -45,6 +45,7 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private final String providedName; private Index recoverFrom; private ResizeType resizeType; + private boolean copySettings; private IndexMetaData.State state = IndexMetaData.State.OPEN; @@ -112,6 +113,11 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ return this; } + public CreateIndexClusterStateUpdateRequest copySettings(final boolean copySettings) { + this.copySettings = copySettings; + return this; + } + public TransportMessage originalMessage() { return originalMessage; } @@ -170,4 +176,9 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ public ResizeType resizeType() { return resizeType; } + + public boolean copySettings() { + return copySettings; + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index 79600674f4a..77b3945db09 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action.admin.indices.shrink; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -55,6 +56,7 @@ public class ResizeRequest extends AcknowledgedRequest implements private CreateIndexRequest targetIndexRequest; private String sourceIndex; private ResizeType type = ResizeType.SHRINK; + private boolean copySettings = false; ResizeRequest() {} @@ -96,6 +98,11 @@ public class ResizeRequest extends AcknowledgedRequest implements } else { type = ResizeType.SHRINK; // BWC this used to be shrink only } + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + copySettings = in.readBoolean(); + } else { + copySettings = false; + } } @Override @@ -106,6 +113,9 @@ public class ResizeRequest extends AcknowledgedRequest implements if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) { out.writeEnum(type); } + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeBoolean(copySettings); + } } @Override @@ -177,6 +187,14 @@ public class ResizeRequest extends AcknowledgedRequest implements return type; } + public void setCopySettings(final boolean copySettings) { + this.copySettings = copySettings; + } + + public boolean getCopySettings() { + return copySettings; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 28fc994a467..834ef15ce26 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -178,19 +178,19 @@ public class TransportResizeAction extends TransportMasterNodeAction listener, IndicesService indicesService, AliasValidator aliasValidator, NamedXContentRegistry xContentRegistry, - Settings settings, IndexValidator validator) { + Settings settings, IndexValidator validator, IndexScopedSettings indexScopedSettings) { super(Priority.URGENT, request, listener); this.request = request; this.logger = logger; @@ -252,6 +263,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { this.xContentRegistry = xContentRegistry; this.settings = settings; this.validator = validator; + this.indexScopedSettings = indexScopedSettings; } @Override @@ -273,7 +285,8 @@ public class MetaDataCreateIndexService extends AbstractComponent { // we only find a template when its an API call (a new index) // find templates, highest order are better matching - List templates = MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index()); + List templates = + MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index()); Map customs = new HashMap<>(); @@ -402,7 +415,14 @@ public class MetaDataCreateIndexService extends AbstractComponent { if (recoverFromIndex != null) { assert request.resizeType() != null; prepareResizeIndexSettings( - currentState, mappings.keySet(), indexSettingsBuilder, recoverFromIndex, request.index(), request.resizeType()); + currentState, + mappings.keySet(), + indexSettingsBuilder, + recoverFromIndex, + request.index(), + request.resizeType(), + request.copySettings(), + indexScopedSettings); } final Settings actualIndexSettings = indexSettingsBuilder.build(); tmpImdBuilder.settings(actualIndexSettings); @@ -673,8 +693,15 @@ public class MetaDataCreateIndexService extends AbstractComponent { return sourceMetaData; } - static void prepareResizeIndexSettings(ClusterState currentState, Set mappingKeys, Settings.Builder indexSettingsBuilder, - Index resizeSourceIndex, String resizeIntoName, ResizeType type) { + static void prepareResizeIndexSettings( + final ClusterState currentState, + final Set mappingKeys, + final Settings.Builder indexSettingsBuilder, + final Index resizeSourceIndex, + final String resizeIntoName, + final ResizeType type, + final boolean copySettings, + final IndexScopedSettings indexScopedSettings) { final IndexMetaData sourceMetaData = currentState.metaData().index(resizeSourceIndex.getName()); if (type == ResizeType.SHRINK) { final List nodesToAllocateOn = validateShrinkIndex(currentState, resizeSourceIndex.getName(), @@ -695,15 +722,33 @@ public class MetaDataCreateIndexService extends AbstractComponent { throw new IllegalStateException("unknown resize type is " + type); } - final Predicate sourceSettingsPredicate = - (s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || s.startsWith("index.sort.")) - && indexSettingsBuilder.keys().contains(s) == false; + final Settings.Builder builder = Settings.builder(); + if (copySettings) { + // copy all settings and non-copyable settings and settings that have already been set (e.g., from the request) + for (final String key : sourceMetaData.getSettings().keySet()) { + final Setting setting = indexScopedSettings.get(key); + if (setting == null) { + assert indexScopedSettings.isPrivateSetting(key) : key; + } else if (setting.getProperties().contains(Setting.Property.NotCopyableOnResize)) { + continue; + } + // do not override settings that have already been set (for example, from the request) + if (indexSettingsBuilder.keys().contains(key)) { + continue; + } + builder.copy(key, sourceMetaData.getSettings()); + } + } else { + final Predicate sourceSettingsPredicate = + (s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || s.startsWith("index.sort.")) + && indexSettingsBuilder.keys().contains(s) == false; + builder.put(sourceMetaData.getSettings().filter(sourceSettingsPredicate)); + } + indexSettingsBuilder - // now copy all similarity / analysis / sort settings - this overrides all settings from the user unless they - // wanna add extra settings .put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion()) .put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion()) - .put(sourceMetaData.getSettings().filter(sourceSettingsPredicate)) + .put(builder.build()) .put(IndexMetaData.SETTING_ROUTING_PARTITION_SIZE, sourceMetaData.getRoutingPartitionSize()) .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), resizeSourceIndex.getName()) .put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID.getKey(), resizeSourceIndex.getUUID()); diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index bd6bba7b784..debd0f59a2e 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -43,6 +43,7 @@ import org.elasticsearch.indices.IndicesRequestCache; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 9d4ee53aa1a..d9e42a67671 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -114,7 +114,13 @@ public class Setting implements ToXContentObject { /** * Index scope */ - IndexScope + IndexScope, + + /** + * Mark this setting as not copyable during an index resize (shrink or split). This property can only be applied to settings that + * also have {@link Property#IndexScope}. + */ + NotCopyableOnResize } private final Key key; @@ -142,10 +148,15 @@ public class Setting implements ToXContentObject { if (properties.length == 0) { this.properties = EMPTY_PROPERTIES; } else { - this.properties = EnumSet.copyOf(Arrays.asList(properties)); - if (isDynamic() && isFinal()) { + final EnumSet propertiesAsSet = EnumSet.copyOf(Arrays.asList(properties)); + if (propertiesAsSet.contains(Property.Dynamic) && propertiesAsSet.contains(Property.Final)) { throw new IllegalArgumentException("final setting [" + key + "] cannot be dynamic"); } + if (propertiesAsSet.contains(Property.NotCopyableOnResize) && propertiesAsSet.contains(Property.IndexScope) == false) { + throw new IllegalArgumentException( + "non-index-scoped setting [" + key + "] can not have property [" + Property.NotCopyableOnResize + "]"); + } + this.properties = propertiesAsSet; } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java index 3d0158cf95f..e6c994a85c3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; @@ -46,6 +47,19 @@ public abstract class RestResizeHandler extends BaseRestHandler { public final RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index")); resizeRequest.setResizeType(getResizeType()); + final String rawCopySettings = request.param("copy_settings"); + final boolean copySettings; + if (rawCopySettings == null) { + copySettings = resizeRequest.getCopySettings(); + } else { + deprecationLogger.deprecated("parameter [copy_settings] is deprecated but was [" + rawCopySettings + "]"); + if (rawCopySettings.length() == 0) { + copySettings = true; + } else { + copySettings = Booleans.parseBoolean(rawCopySettings); + } + } + resizeRequest.setCopySettings(copySettings); request.applyContentParser(resizeRequest::fromXContent); resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout())); resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout())); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 49df78565d3..ad36457bde5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -388,8 +389,7 @@ public class IndexCreationTaskTests extends ESTestCase { setupRequest(); final MetaDataCreateIndexService.IndexCreationTask task = new MetaDataCreateIndexService.IndexCreationTask( logger, allocationService, request, listener, indicesService, aliasValidator, xContentRegistry, clusterStateSettings.build(), - validator - ); + validator, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); return task.execute(state); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 28fbfaefe6d..d5f3d71d7ee 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.cluster.ClusterName; @@ -34,22 +35,27 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; +import java.util.Set; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptyMap; -import static java.util.Collections.min; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; @@ -228,90 +234,146 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { Settings.builder().put("index.number_of_shards", targetShards).build()); } - public void testResizeIndexSettings() { - String indexName = randomAlphaOfLength(10); - List versions = Arrays.asList(VersionUtils.randomVersion(random()), VersionUtils.randomVersion(random()), - VersionUtils.randomVersion(random())); + public void testPrepareResizeIndexSettings() { + final List versions = Arrays.asList(VersionUtils.randomVersion(random()), VersionUtils.randomVersion(random())); versions.sort(Comparator.comparingLong(l -> l.id)); - Version version = versions.get(0); - Version minCompat = versions.get(1); - Version upgraded = versions.get(2); - // create one that won't fail - ClusterState clusterState = ClusterState.builder(createClusterState(indexName, randomIntBetween(2, 10), 0, - Settings.builder() - .put("index.blocks.write", true) - .put("index.similarity.default.type", "BM25") - .put("index.version.created", version) - .put("index.version.upgraded", upgraded) - .put("index.version.minimum_compatible", minCompat.luceneVersion.toString()) - .put("index.analysis.analyzer.default.tokenizer", "keyword") - .build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) - .build(); - AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, - Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), - new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); + final Version version = versions.get(0); + final Version upgraded = versions.get(1); + final Settings indexSettings = + Settings.builder() + .put("index.version.created", version) + .put("index.version.upgraded", upgraded) + .put("index.similarity.default.type", "BM25") + .put("index.analysis.analyzer.default.tokenizer", "keyword") + .build(); + runPrepareResizeIndexSettingsTest( + indexSettings, + Settings.EMPTY, + Collections.emptyList(), + randomBoolean(), + settings -> { + assertThat("similarity settings must be copied", settings.get("index.similarity.default.type"), equalTo("BM25")); + assertThat( + "analysis settings must be copied", + settings.get("index.analysis.analyzer.default.tokenizer"), + equalTo("keyword")); + assertThat(settings.get("index.routing.allocation.initial_recovery._id"), equalTo("node1")); + assertThat(settings.get("index.allocation.max_retries"), equalTo("1")); + assertThat(settings.getAsVersion("index.version.created", null), equalTo(version)); + assertThat(settings.getAsVersion("index.version.upgraded", null), equalTo(upgraded)); + }); + } - RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); - clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - // now we start the shard - routingTable = service.applyStartedShards(clusterState, - routingTable.index(indexName).shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); - clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - - { - final Settings.Builder builder = Settings.builder(); - builder.put("index.number_of_shards", 1); - MetaDataCreateIndexService.prepareResizeIndexSettings( - clusterState, - Collections.emptySet(), - builder, - clusterState.metaData().index(indexName).getIndex(), - "target", - ResizeType.SHRINK); - final Settings settings = builder.build(); - assertThat("similarity settings must be copied", settings.get("index.similarity.default.type"), equalTo("BM25")); - assertThat( - "analysis settings must be copied", settings.get("index.analysis.analyzer.default.tokenizer"), equalTo("keyword")); - assertThat(settings.get("index.routing.allocation.initial_recovery._id"), equalTo("node1")); - assertThat(settings.get("index.allocation.max_retries"), equalTo("1")); - assertThat(settings.getAsVersion("index.version.created", null), equalTo(version)); - assertThat(settings.getAsVersion("index.version.upgraded", null), equalTo(upgraded)); - } + public void testPrepareResizeIndexSettingsCopySettings() { + final int maxMergeCount = randomIntBetween(1, 16); + final int maxThreadCount = randomIntBetween(1, 16); + final Setting nonCopyableExistingIndexSetting = + Setting.simpleString("index.non_copyable.existing", Setting.Property.IndexScope, Setting.Property.NotCopyableOnResize); + final Setting nonCopyableRequestIndexSetting = + Setting.simpleString("index.non_copyable.request", Setting.Property.IndexScope, Setting.Property.NotCopyableOnResize); + runPrepareResizeIndexSettingsTest( + Settings.builder() + .put("index.merge.scheduler.max_merge_count", maxMergeCount) + .put("index.non_copyable.existing", "existing") + .build(), + Settings.builder() + .put("index.blocks.write", (String) null) + .put("index.merge.scheduler.max_thread_count", maxThreadCount) + .put("index.non_copyable.request", "request") + .build(), + Arrays.asList(nonCopyableExistingIndexSetting, nonCopyableRequestIndexSetting), + true, + settings -> { + assertNull(settings.getAsBoolean("index.blocks.write", null)); + assertThat(settings.get("index.routing.allocation.require._name"), equalTo("node1")); + assertThat(settings.getAsInt("index.merge.scheduler.max_merge_count", null), equalTo(maxMergeCount)); + assertThat(settings.getAsInt("index.merge.scheduler.max_thread_count", null), equalTo(maxThreadCount)); + assertNull(settings.get("index.non_copyable.existing")); + assertThat(settings.get("index.non_copyable.request"), equalTo("request")); + }); + } + public void testPrepareResizeIndexSettingsAnalysisSettings() { // analysis settings from the request are not overwritten - { - final Settings.Builder builder = Settings.builder(); - builder.put("index.number_of_shards", 1); - builder.put("index.analysis.analyzer.default.tokenizer", "whitespace"); - MetaDataCreateIndexService.prepareResizeIndexSettings( - clusterState, - Collections.emptySet(), - builder, - clusterState.metaData().index(indexName).getIndex(), - "target", - ResizeType.SHRINK); - final Settings settings = builder.build(); - assertThat( - "analysis settings are not overwritten", - settings.get("index.analysis.analyzer.default.tokenizer"), - equalTo("whitespace")); - } + runPrepareResizeIndexSettingsTest( + Settings.EMPTY, + Settings.builder().put("index.analysis.analyzer.default.tokenizer", "whitespace").build(), + Collections.emptyList(), + randomBoolean(), + settings -> + assertThat( + "analysis settings are not overwritten", + settings.get("index.analysis.analyzer.default.tokenizer"), + equalTo("whitespace")) + ); + } + + public void testPrepareResizeIndexSettingsSimilaritySettings() { // similarity settings from the request are not overwritten - { - final Settings.Builder builder = Settings.builder(); - builder.put("index.number_of_shards", 1); - builder.put("index.similarity.default.type", "DFR"); - MetaDataCreateIndexService.prepareResizeIndexSettings( - clusterState, - Collections.emptySet(), - builder, - clusterState.metaData().index(indexName).getIndex(), - "target", - ResizeType.SHRINK); - final Settings settings = builder.build(); - assertThat("similarity settings are not overwritten", settings.get("index.similarity.default.type"), equalTo("DFR")); - } + runPrepareResizeIndexSettingsTest( + Settings.EMPTY, + Settings.builder().put("index.similarity.sim.type", "DFR").build(), + Collections.emptyList(), + randomBoolean(), + settings -> + assertThat("similarity settings are not overwritten", settings.get("index.similarity.sim.type"), equalTo("DFR"))); + + } + + private void runPrepareResizeIndexSettingsTest( + final Settings sourceSettings, + final Settings requestSettings, + final Collection> additionalIndexScopedSettings, + final boolean copySettings, + final Consumer consumer) { + final String indexName = randomAlphaOfLength(10); + + final Settings indexSettings = Settings.builder() + .put("index.blocks.write", true) + .put("index.routing.allocation.require._name", "node1") + .put(sourceSettings) + .build(); + + final ClusterState initialClusterState = + ClusterState + .builder(createClusterState(indexName, randomIntBetween(2, 10), 0, indexSettings)) + .nodes(DiscoveryNodes.builder().add(newNode("node1"))) + .build(); + + final AllocationService service = new AllocationService( + Settings.builder().build(), + new AllocationDeciders(Settings.EMPTY, + Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE); + + final RoutingTable initialRoutingTable = service.reroute(initialClusterState, "reroute").routingTable(); + final ClusterState routingTableClusterState = ClusterState.builder(initialClusterState).routingTable(initialRoutingTable).build(); + + // now we start the shard + final RoutingTable routingTable = service.applyStartedShards( + routingTableClusterState, + initialRoutingTable.index(indexName).shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); + final ClusterState clusterState = ClusterState.builder(routingTableClusterState).routingTable(routingTable).build(); + + final Settings.Builder indexSettingsBuilder = Settings.builder().put("index.number_of_shards", 1).put(requestSettings); + final Set> settingsSet = + Stream.concat( + IndexScopedSettings.BUILT_IN_INDEX_SETTINGS.stream(), + additionalIndexScopedSettings.stream()) + .collect(Collectors.toSet()); + MetaDataCreateIndexService.prepareResizeIndexSettings( + clusterState, + Collections.emptySet(), + indexSettingsBuilder, + clusterState.metaData().index(indexName).getIndex(), + "target", + ResizeType.SHRINK, + copySettings, + new IndexScopedSettings(Settings.EMPTY, settingsSet)); + consumer.accept(indexSettingsBuilder.build()); } private DiscoveryNode newNode(String nodeId) { diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 187c0e21b4d..1ab92526e31 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -722,12 +722,19 @@ public class SettingTests extends ESTestCase { assertThat(ex.getMessage(), containsString("properties cannot be null for setting")); } - public void testRejectConflictProperties() { + public void testRejectConflictingDynamicAndFinalProperties() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> Setting.simpleString("foo.bar", Property.Final, Property.Dynamic)); assertThat(ex.getMessage(), containsString("final setting [foo.bar] cannot be dynamic")); } + public void testRejectNonIndexScopedNotCopyableOnResizeSetting() { + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> Setting.simpleString("foo.bar", Property.NotCopyableOnResize)); + assertThat(e, hasToString(containsString("non-index-scoped setting [foo.bar] can not have property [NotCopyableOnResize]"))); + } + public void testTimeValue() { final TimeValue random = TimeValue.parseTimeValue(randomTimeValue(), "test"); diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java new file mode 100644 index 00000000000..75071309458 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.indices; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import java.io.IOException; +import java.util.Collections; + +import static org.mockito.Mockito.mock; + +public class RestResizeHandlerTests extends ESTestCase { + + public void testShrinkCopySettingsDeprecated() throws IOException { + final RestResizeHandler.RestShrinkIndexAction handler = + new RestResizeHandler.RestShrinkIndexAction(Settings.EMPTY, mock(RestController.class)); + final String copySettings = randomFrom("true", "false"); + final FakeRestRequest request = + new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) + .withParams(Collections.singletonMap("copy_settings", copySettings)) + .withPath("source/_shrink/target") + .build(); + handler.prepareRequest(request, mock(NodeClient.class)); + assertWarnings("parameter [copy_settings] is deprecated but was [" + copySettings + "]"); + } + + public void testSplitCopySettingsDeprecated() throws IOException { + final RestResizeHandler.RestSplitIndexAction handler = + new RestResizeHandler.RestSplitIndexAction(Settings.EMPTY, mock(RestController.class)); + final String copySettings = randomFrom("true", "false"); + final FakeRestRequest request = + new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) + .withParams(Collections.singletonMap("copy_settings", copySettings)) + .withPath("source/_split/target") + .build(); + handler.prepareRequest(request, mock(NodeClient.class)); + assertWarnings("parameter [copy_settings] is deprecated but was [" + copySettings + "]"); + } + +} From 5de6f4ff7b637cd03a632ba01c16a4f8ef40747c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 1 May 2018 08:46:12 -0400 Subject: [PATCH 43/68] Adjust copy settings on resize BWC version This commit adjusts the BWC version for copy settings on resize operations after the behavior was backported to 6.x. --- docs/CHANGELOG.asciidoc | 4 ++-- docs/reference/indices/shrink-index.asciidoc | 2 +- docs/reference/indices/split-index.asciidoc | 2 +- .../migration/migrate_7_0/api.asciidoc | 17 ----------------- .../test/indices.shrink/30_copy_settings.yml | 4 ++-- .../test/indices.split/30_copy_settings.yml | 4 ++-- .../admin/indices/shrink/ResizeRequest.java | 4 ++-- 7 files changed, 10 insertions(+), 27 deletions(-) diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 9572bbb35c7..2bad49c4807 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -24,8 +24,6 @@ === Enhancements -<> ({pull}30255[#30255]) - === Bug Fixes Fail snapshot operations early when creating or deleting a snapshot on a repository that has been @@ -41,6 +39,8 @@ written to by an older Elasticsearch after writing to it with a newer Elasticsea === Enhancements +<> ({pull}30255[#30255]) + === Bug Fixes Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index ac1813a6fba..81d79c47472 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -127,7 +127,7 @@ during a shrink operation. With the exception of non-copyable settings, settings from the source index can be copied to the target index by adding the URL parameter `copy_settings=true` to the request. -deprecated[7.0.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0] +deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0] [float] === Monitoring the shrink process diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index 094e5e12718..58d34cfd9a7 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -183,7 +183,7 @@ during a split operation. With the exception of non-copyable settings, settings from the source index can be copied to the target index by adding the URL parameter `copy_settings=true` to the request. -deprecated[7.0.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0] +deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0] [float] === Monitoring the split process diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index 91adf7160b5..e140fd577bd 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -65,20 +65,3 @@ deprecated in 6.3.0 and now removed in 7.0.0. In the past, `fields` could be provided either as a parameter, or as part of the request body. Specifying `fields` in the request body as opposed to a parameter was deprecated in 6.4.0, and is now unsupported in 7.0.0. - -[[copy-source-settings-on-resize]] -==== Copying source settings during shrink/split operations - -In prior versions of Elasticsearch, resize operations (shrink/split) would only -copy `index.analysis`, `index.similarity`, and `index.sort` settings from the -source index. Elasticsearch 7.0.0 introduces a request parameter `copy_settings` -which will copy all index settings from the source except for non-copyable index -settings. This parameter defaults to `false` in 7.x, is immediately deprecated -in 7.0.0, will only be able to be set to `true` in 8.x, and will be removed in -9.0.0. Note than when this parameter is used it means that all copyable settings -will be copied; this includes the index blocks that must be put in place for a -resize operation, and any allocation settings put in place in preparation for -executing the resize operation. If you use this parameter, you will either have -to follow up the operation with a request to adjust to the desired settings on -the target index, or send the desired value of these settings with the resize -operation. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index d85a4cb1dd7..34757427e69 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during shrink index": - skip: - version: " - 6.99.99" - reason: copy_settings did not exist prior to 7.0.0 + version: " - 6.3.99" + reason: copy_settings did not exist prior to 6.4.0 features: "warnings" - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index 1bb0a52307d..1d3e37aa7b0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during split index": - skip: - version: " - 6.99.99" - reason: copy_settings did not exist prior to 7.0.0 + version: " - 6.3.99" + reason: copy_settings did not exist prior to 6.4.0 features: "warnings" - do: diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index 77b3945db09..f53b5437f03 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -98,7 +98,7 @@ public class ResizeRequest extends AcknowledgedRequest implements } else { type = ResizeType.SHRINK; // BWC this used to be shrink only } - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { copySettings = in.readBoolean(); } else { copySettings = false; @@ -113,7 +113,7 @@ public class ResizeRequest extends AcknowledgedRequest implements if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) { out.writeEnum(type); } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeBoolean(copySettings); } } From dd5fcb211d7c416693c55b9cdf3400111bbb5c9d Mon Sep 17 00:00:00 2001 From: Diwas Joshi Date: Tue, 1 May 2018 18:46:38 +0530 Subject: [PATCH 44/68] index name added to snapshot restore exception (#29604) This PR adds index name to snapshot restore exception if index is renamed during restoring. closes [#27601](https://github.com/elastic/elasticsearch/issues/27601) --- .../main/java/org/elasticsearch/snapshots/RestoreService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 63079fd63ce..a7df9bdfdfd 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -412,7 +412,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp // Make sure that the number of shards is the same. That's the only thing that we cannot change if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) { throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() + - "] shard from snapshot with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards"); + "] shards from a snapshot of index [" + snapshotIndexMetaData.getIndex().getName() + "] with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards"); } } From 6229c0ce8820eaf45be2815df862140c725e6201 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Tue, 1 May 2018 15:20:12 +0200 Subject: [PATCH 45/68] Remove usage of transient settings to enable allocations in full restart upgrade docs (#29670) Since we disable allocation using persistent settings, we should be consistent and remove the setting from the persistent storage. Otherwise an accidental restart will leed for shards not being allocated. Relates to https://github.com/elastic/elasticsearch/pull/28757 --- docs/reference/upgrade/cluster_restart.asciidoc | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index bdd8a8207ff..b092e45ae29 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -91,25 +91,20 @@ already have local shard copies. + -- When all nodes have joined the cluster and recovered their primary shards, -reenable allocation. +reenable allocation by restoring `cluster.routing.allocation.enable` to its +default: [source,js] ------------------------------------------------------ PUT _cluster/settings { - "transient": { - "cluster.routing.allocation.enable": "all" + "persistent": { + "cluster.routing.allocation.enable": null } } ------------------------------------------------------ // CONSOLE -NOTE: Because <<_precedence_of_settings, transient -settings take precedence over persistent settings>>, this overrides the -persistent setting used to disable shard allocation in the first step. If you -don't explicitly reenable shard allocation after a full cluster restart, the -persistent setting is used and shard allocation remains disabled. - Once allocation is reenabled, the cluster starts allocating replica shards to the data nodes. At this point it is safe to resume indexing and searching, but your cluster will recover more quickly if you can wait until all primary From d2ca16b4c7ea3b3b64b151c60dae10fea36a7a23 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 1 May 2018 14:26:19 +0100 Subject: [PATCH 46/68] Suppress reindex-from-old tests if there are spaces in the path --- modules/reindex/build.gradle | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index f34f4cf52e0..c0289f7df72 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -83,8 +83,9 @@ dependencies { es090 'org.elasticsearch:elasticsearch:0.90.13@zip' } -if (Os.isFamily(Os.FAMILY_WINDOWS)) { - // we can't get the pid files in windows so we skip reindex-from-old +if (Os.isFamily(Os.FAMILY_WINDOWS) || rootProject.rootDir.toString().contains(" ")) { + // We can't get the pid files in windows and old versions of Elasticsearch doesn't set up the CLASSPATH correctly if there are spaces + // in the path. In either case, we skip reindex-from-old. integTestRunner.systemProperty "tests.fromOld", "false" } else { integTestRunner.systemProperty "tests.fromOld", "true" From f4901b8a192a7096af1e8c58bb6eb4ca58a2a56c Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Tue, 1 May 2018 15:51:21 +0200 Subject: [PATCH 47/68] Remove usage of transient settings to enable allocations in rolling upgrade docs (#29671) Since we disable allocation using persistent settings, we should be consistent and remove the setting from the persistent storage. Otherwise an accidental restart will lead for shards not being allocated. Relates to #28757 --- docs/reference/upgrade/rolling_upgrade.asciidoc | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index 5af52130317..76a10f752be 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -72,21 +72,15 @@ GET _cat/nodes + -- -NOTE: Because <<_precedence_of_settings, transient -settings take precedence over persistent settings>>, this overrides the -persistent setting used to disable shard allocation in the first step. If you -don't explicitly reenable shard allocation after a full cluster restart, the -persistent setting is used and shard allocation remains disabled. - -Once the node has joined the cluster, reenable shard allocation to start using -the node: +Once the node has joined the cluster, remove the `cluster.routing.allocation.enable` +setting to enable shard allocation and start using the node: [source,js] -------------------------------------------------- PUT _cluster/settings { - "transient": { - "cluster.routing.allocation.enable": "all" + "persistent": { + "cluster.routing.allocation.enable": null } } -------------------------------------------------- From 038fe1151b413618eb5c9a733f243ef6abb6316c Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 1 May 2018 09:39:36 -0400 Subject: [PATCH 48/68] TEST: Add debug log to FlushIT We still don't have a strong reason for the failures of testDoNotRenewSyncedFlushWhenAllSealed and testSyncedFlushSkipOutOfSyncReplicas. This commit adds debug logging for these two tests. --- .../elasticsearch/indices/flush/FlushIT.java | 20 +++++++++++-------- .../indices/flush/SyncedFlushUtil.java | 6 ++++-- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index a2149b9d28a..27e1c1af2bb 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -51,6 +51,7 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -117,7 +118,7 @@ public class FlushIT extends ESIntegTestCase { ShardsSyncedFlushResult result; if (randomBoolean()) { logger.info("--> sync flushing shard 0"); - result = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), new ShardId(index, 0)); + result = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), new ShardId(index, 0)); } else { logger.info("--> sync flushing index [test]"); SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get(); @@ -246,11 +247,14 @@ public class FlushIT extends ESIntegTestCase { } private String syncedFlushDescription(ShardsSyncedFlushResult result) { - return result.shardResponses().entrySet().stream() + String detail = result.shardResponses().entrySet().stream() .map(e -> "Shard [" + e.getKey() + "], result [" + e.getValue() + "]") .collect(Collectors.joining(",")); + return String.format(Locale.ROOT, "Total shards: [%d], failed: [%s], reason: [%s], detail: [%s]", + result.totalShards(), result.failed(), result.failureReason(), detail); } + @TestLogging("_root:DEBUG") public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -275,7 +279,7 @@ public class FlushIT extends ESIntegTestCase { for (int i = 0; i < extraDocs; i++) { indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i); } - final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); logger.info("Partial seal: {}", syncedFlushDescription(partialResult)); assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1)); assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas)); @@ -287,7 +291,7 @@ public class FlushIT extends ESIntegTestCase { indexDoc(IndexShardTestCase.getEngine(indexShard), "extra_" + i); } } - final ShardsSyncedFlushResult fullResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + final ShardsSyncedFlushResult fullResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); assertThat(fullResult.totalShards(), equalTo(numberOfReplicas + 1)); assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1)); } @@ -308,11 +312,11 @@ public class FlushIT extends ESIntegTestCase { for (int i = 0; i < numDocs; i++) { index("test", "doc", Integer.toString(i)); } - final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); logger.info("First seal: {}", syncedFlushDescription(firstSeal)); assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1)); // Do not renew synced-flush - final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); logger.info("Second seal: {}", syncedFlushDescription(secondSeal)); assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId())); @@ -321,7 +325,7 @@ public class FlushIT extends ESIntegTestCase { for (int i = 0; i < moreDocs; i++) { index("test", "doc", Integer.toString(i)); } - final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); logger.info("Third seal: {}", syncedFlushDescription(thirdSeal)); assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId()))); @@ -337,7 +341,7 @@ public class FlushIT extends ESIntegTestCase { shard.flush(new FlushRequest(shardId.getIndexName()).force(true).waitIfOngoing(true)); assertThat(shard.commitStats().syncId(), nullValue()); } - final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); logger.info("Forth seal: {}", syncedFlushDescription(forthSeal)); assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId()))); diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java index adaa612adb3..987f69b6587 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java @@ -18,11 +18,11 @@ */ package org.elasticsearch.indices.flush; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.InternalTestCluster; @@ -40,8 +40,10 @@ public class SyncedFlushUtil { /** * Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)} */ - public static ShardsSyncedFlushResult attemptSyncedFlush(InternalTestCluster cluster, ShardId shardId) { + public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) { SyncedFlushService service = cluster.getInstance(SyncedFlushService.class); + logger.debug("Issue synced-flush on node [{}], shard [{}], cluster state [{}]", + service.nodeName(), shardId, cluster.clusterService(service.nodeName()).state()); LatchedListener listener = new LatchedListener<>(); service.attemptSyncedFlush(shardId, listener); try { From d12e6442068e5ed64e1f948c1eab3783c16bf1c1 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 1 May 2018 11:23:18 -0400 Subject: [PATCH 49/68] Build: Log a warning if disabling reindex-from-old (#30304) We disable the reindex-from-old tests if we're running on windows or in a directory that contains a space. This adds a warning to the logs when we do that so that you can tell that it happened. This will be nice to have when looking at CI and will be a hint to anyone developing locally. --- modules/reindex/build.gradle | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index c0289f7df72..a153cc555c8 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -83,9 +83,11 @@ dependencies { es090 'org.elasticsearch:elasticsearch:0.90.13@zip' } -if (Os.isFamily(Os.FAMILY_WINDOWS) || rootProject.rootDir.toString().contains(" ")) { - // We can't get the pid files in windows and old versions of Elasticsearch doesn't set up the CLASSPATH correctly if there are spaces - // in the path. In either case, we skip reindex-from-old. +if (Os.isFamily(Os.FAMILY_WINDOWS)) { + logger.warn("Disabling reindex-from-old tests because we can't get the pid file on windows") + integTestRunner.systemProperty "tests.fromOld", "false" +} else if (rootProject.rootDir.toString().contains(" ")) { + logger.warn("Disabling reindex-from-old tests because Elasticsearch 1.7 won't start with spaces in the path") integTestRunner.systemProperty "tests.fromOld", "false" } else { integTestRunner.systemProperty "tests.fromOld", "true" From a324cd41f7a2a507bb3e78dcdcf946117159d11a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 1 May 2018 08:47:53 -0700 Subject: [PATCH 50/68] Build: Move gradle wrapper jar to a dot dir (#30146) This commit moves the gradle wrapper jar file to a hidden directory, so that it does not clutter the top level names seen when doing an ls in the project. The actual jar file is never manually edited, and only changed by running `./gradlew wrapper ...` so it is not important for this directory to be "visible". --- .../gradle-wrapper.jar | Bin 54329 -> 54413 bytes .../gradle-wrapper.properties | 4 +-- build.gradle | 29 +++++++----------- gradlew | 2 +- gradlew.bat | 2 +- 5 files changed, 15 insertions(+), 22 deletions(-) rename {gradle/wrapper => .gradle-wrapper}/gradle-wrapper.jar (81%) rename {gradle/wrapper => .gradle-wrapper}/gradle-wrapper.properties (100%) diff --git a/gradle/wrapper/gradle-wrapper.jar b/.gradle-wrapper/gradle-wrapper.jar similarity index 81% rename from gradle/wrapper/gradle-wrapper.jar rename to .gradle-wrapper/gradle-wrapper.jar index a5fe1cb94b9ee5ce57e6113458225bcba12d83e3..91ca28c8b802289c3a438766657a5e98f20eff03 100644 GIT binary patch delta 7397 zcmY+JWmFVEyv3IV=@jV(>F!z@2@w$KmS%wkX_s20k!ES6J0t|8k&cC>kxuDOX?#B4 zJ1^$b@7_Ce=FH5Ong2AGqQ;b=#*3n*Sr_*uNE)JFxShG70OBcY>n*sk%p!xW6fgjQ zBRDM&7tC8npX5nn+ckXXnY>Y{cCI=kWN6%){zd#LVaB+K7p4${BAbJYEe{+=^g7o2 z__WmJ>zD&~hr|ATWSj$-S%Gb#e5Sm?p<;&4WZ3+X?4hqH!1wr#Ksqkbh>`eCw*T+> zdr4oIU5+5{&z^U6jHJ3~r{01DW22Zt>hf#9IBE|C7t*QCvC|Q0*{0hOPzIC%= zQ~86R@Nx1*(OD6DAAhX2>zDp?^3l(m3<+(5VctWD-UDZ}ZTc@y9Q*FiP_%Axap|1< z!cW)r{Ltu)8r7|NIH$V8}$P&4CK|U7pkjTPh7{DVY2-*0E<=ux<`7XB~s7k>OnJOT0zW_N)}E z+g1`<4W42#8BC;7L%Z2UF=_s>L?z*|w|n&6dGuDALeY@xl%#dE8tsB$VoFb#-B1$; z?l2EYZZ3fYzSNvjt^GTfLZ5Ck_rS zb=+}lXfza2Kg4%j^mWuVtR-m0-PGJ-<{|A1_w>f(M2+>%gZeS4Q=RPJtsOa~wu^M{;luOUT~@`NyIiV`8gW1%!)1FBg69GkONB9Yy5ArTOk<9HJqOLnS; z_ha%bf9_T|HUSg07;>0*`)&S?nJd$ zX35$0rU2z3=pctjJUdOtFihi$tv5`)U{}Il=z(>9A?GY3_6KH&9M;c_J`G%m=(d@~ zQT|Z%r%J9eROrRbV?3?ln9u_v;qBZ?{YznP790&%Huf27@X93tC6JT;OaECsKUiea zzsl(|B)p_fa=uWSn^5<*HOAGAd>a^5NZyd3eI-Dg=$i<;SI>EGT@Vi9(jt~1bqU#R)(|KcV?Vnrdv z>od_H7eYVBMcSVUI!l{l9kdDQIF4F^RZM2ct(;m%MiI7=QXq_GefpEOfWhE~h@&Qi zRz{RPm*_pzQ9)$~R-*KwZ_rHOcl@xjB5G}jfbd-AAJN0GJ{*^1n+xVki0+BFneb56 z_OQA3_N?{?Hz7?YG`|5^|5vRjWbL!d@@)$E4_73(1!$ zexzL2VHAi_;ULOV67s}an3{HylUZb&MWy2J)T{QZ?vY-?fi9f|8RN-eyeop^y0-dz#>hdNrDDENnCbbIPam0LCGW!8rk8}OVsk1$ zeMXB>B6zg+;t#aLLHu1m6=DSY@hk^^P(jT7X>k(@nj-`AW#tb6^2l9Q32_x>)@d%N ze{vs?DtgQbR>Vh4e+Ut4j@2UYV`daonL6yRRonz7?d$hAB>k9>C(#fyKz=7vb#s?& zPG?|(#U;du3%CZEfRtIJHF(Tk5}UsyGiOkK8cqJw9cUEg?;v%o$G%i?=bA&=FK0Ab z%73UIXQ?6;in>pMTS3N`Z`RLn#euKic?WCsA~;jZYU@HfIlFrBRi8CJn`NeUig`p%kI zm6yJA34!)1L5d2pz+PDrXItk)oVs2~Z+f|kpFsvkmdi%^V4p4n`6EW96&6If%`F<;#pM$`8!{M*kv2ftii=> z)VSk{*&GKY?4ac_K_Ccj$4-qst|QIVS$F$}BArRSw%hKRJ!u^NsR<9(TkcfEm;e}2 zR7T#HY*O^0A!mmjWUj}BIc{Rc_ABb4&7ea~aj6I<;O!O2av>lyJLnBXsa|sjwk3|? z<$7m#H=r0{o23~ut7i#a$>+(jRsO!X75JvKt`y39dJ!7nocO5$MItV<$dGL9$}gdt z>V5ShA%*FMWseR0UV;WF4Eq13E>ak{0|v#w<*yH^ADvR0B--JYm8-5H1K zVCtf~$hfY>fx)s;iX+Lc6=IOnik?}#FSTyZ0Y046r_yQ{q5cUUYA`aXv#IbZ82a)o=5&_lnAI}NuapU)+> zvraAS9lK`~t!C;2wqJmXeNw#MXd-l4gqXa}R^QZnbTOJK;;?@I@rm%&)XT!SNUM{(Lnx-WhRxPu7{fnlcuStU#%j%GVhvSQyi=Oa>s@|Bti%&f;g2~wYZL4OB}Mxk z+b?F2QRYl8SEH$^Gq|gKB#haIz z!t;iN69p}snp1u63`$`ywSPlVpl8E$$3d3G`1YD}`;<3C3`sekPxy0hxMmFyo{Al` zjUy|SyiSr2y5f=5&mgP6XF%K}*ytW8IoHpLli1}$X7!wnQbBvW;f3szBEF}rR? zY`y}KF6S!8;D}f}))rrPH$rXQNJnS4R74kLLc$Xlfi3HZv7(c;kW~xC!l%tIi8<%%fuw4h?eLli`gI6Kc1dj;G?dZU%Rmk>0 zlI*fs%~LpWeDKVqjhU}l1BezIGOxA@6Fy1dK%$X2@wi`9-Dk2rw!iAbE@yVE;^ikoLwA7OmE8`D`Eh`+;V~q8G+M=nR8w0D3rUpl=QIqW6cR#W zY;H=fZDK3v(JfVx;PyNoNGh$v?A6D?Ny}xo_u*btdiuJqOFV-}gTehtE+HTLD#vCi z(P;=v&BQ)l*JOM5u$++%C0KV4T1u0cFKqo1W9t@;MmdYl7p_>lNlO)kT)JCW9VtPHV=LvN7Cmu!15P~= zF$c_e{H7EUclHaLPVW}W^}+i_m3k%^T=!F`!E7jtgD#J*Vrk%!gD!bR`;?-cQw6^s zMs?OLY~y(CPr6ho2v{&%;&-<-BtQw`Ip1zs{2 zlJHij73LF3dT9AFz7*)QEA_3p!}WYQ0?UXmjQacL??6~-aXabU#l!MXHFclTo1S(a{$9+Ou=$cZDW!05dm|L@K!HX_WTu+?pi z$kqV%mHXt^bQ5$ho@BSoFJ?IYgt;hefLyzkEix$*J}QA5*Dk9&DFAaCBd*-o7VNTW zlvG=i*DqY|*|1z;)URPCs(YsgPe~S0Zam|}Y9`eqd2n!l{gFqusPv4n+&W+Fk)xbm zdzXM7H=IUdvNTq4Kthztjw5VUA;BmZ#T_a} zXfCQ{AXqeAM&}`GrRdZ)&TYsdzqA9lM`6MdV_3Bc&Q<-{go7O~E^ zB+1H5?E^JZhaahb98T-Q=;L^R_V?-q9=(nTHYUTIvVQs?(oce8pj#_agXmxlPa_bK zau%s)QijfkTYKXL^IMmNlo8?0wTk{5qF^^Za9AiYC4CJlE5;ez-EDZhq=WtV+9|=J zdrjxAPf6+VIY|9?-N@&u?um#2njz=;MmR!S(Qmtp^<%eV{n8&8ahAq!gPbab^ic`s zyJ0~wcc99P7n5uLIQeEmYkzWh5Vrh@;jC-W2ZwsPj>On| z(^XgybF&i7-OamAiBIwSlZlSR#Vdp}R)ZsekYoaci@43^_HDzOFN#Ygfn8V?j~7T|6QKR zmkHh^#w|>z0HL?8tL4 z#j>Jk@asSr=>&I8HHPo-uHK!5P$s$*!T6DZaAxro&F<|k)!b3vsAMfhs+KaHbFTqF z@E*H_x@c_yC1XKvMRO`cO0DhJz=Ysa#sj0v%Cb_Lfw z__Tj&^5blO$_^Vs9$%Defz%#eJ>{M>`+`?KO3+@yNN&l@ph=^p?3kpN6HX^mLL8;h znX}4v-_c1ZGNxK)$1vi4km%f%ejMkj0D~6P=zkXprF=)BTwIrrvDK197V+Vd)W1H7wH&?}1G!1${&? zji^*OwX5nDnXEZsh`aiRBDWb5%DiG%R*%%4a7zk2KHuRt{-opP?bG6RQ@_Jrjwlcc z1c?I{V!mStTm$P+uMqeQJ+G&A-Bogkd9n^ zow6ZV5GhtRsc+n5Vpm!gL~@IgZ%M6SEirNsUCuhF?$hN(;FV7cQV-0WmV+mE&j1UM z*5gsoF|(+ckJqjHl>N;;_7d$6)awK+so|DP85m4m6cAC}YjgzQewH$pvS#KLNV=gt zWpcapyxLARfGB_63|p6Ui?{UbnZJMi1E7pVZdJRUG0y0E8+C|I2lvjlSp5_*-9ppR z9Q?Y|hjxxf;g3l!AMqNq?7hENyaue$&EQ)6FbDWW#-+fS((exFbIWL=*R@bj_n6DF zGCb@vMge`!wA!G4&-nEB;*{lqQ0^)M{2B->b;pmtJ_V>dbMl(ZYwlESZKFt3Bd@t; zzC*F~yF5cZKTtpDuTE;>X#^O@JfP2J!20&Rf0;m2)H9@oGAu5WhS2UQ705k;s>y`Sd{YI7DBC zBW86YCiE2dyyhHO54}mazQYIr&G>9TXvbTK9y3uYjZq3E0q|CoB)hdt{WX#^W6{9b z&nmv5v0eF#uJwL#X3rXVGUQea5`B~e}uKLq6{Vekx6e!b8jq)3LJg-M=&0@ZdMnC*XKnKQY#v0E4a#+rkFQG4PF7MDnu$0{d@2)I(fA!1#X({jHj zKz%G%jhilKF2nq6|EC<~QQ!(T#~2+bAc{|YD0lVv+2a^p7)&X3?Y#g)mTJl)9^m8s z*)qZWHG$;QO3&fF5Pr+Eyk+V;JgzHaiSU4Mr<=IkOW*N3%Xx*Cn8({WVH}|VjK0f+x=;||J9}N#t+mw ze-&XLYz)9#H4ID`K0E~8)~)&PQ>%`k2OI7G{A4i>C{zExYTMuX+yA!D!tkzMlE1|_ zJb1|f2XI2=pR7{Bg%1rA!qEmPf!1pOEH!mJxP}@z+-Q&k=&b(F${dseX6XFGlR;&m zkKsR5A5sMBnfy0mz^jMEfL1neP8()8K7s?K8!nCj0ncpU%{GGY_$fko3xX2G4?csi z2Qon5av{`k<6${qqw_y(>mc~oXMUIxNcQ2Mb?<@;ry1b@(t7=sOmL?Wejv8@KfLmy zgfIO~a(wy2|{?vC?!fAymk}} ztd0Gb$0wk}UyaEEixU1@lEOR3IDoB5|C3fG5&at~GERxo1lJmW56nvcpD;H4Kl>BH zffJNKuP^^>xd}-iGW#DkWz+s0$^pd5|0@~cc$0jXlaLj2=bn>46Jlg*Nq_8n4 delta 7284 zcmZ9Rbxa&UyY*p_;_gLi^ZN&X(u!mC-t5&0d@(I3`~ z>g&k1R{I+5GW#A-oKIB>1A)^~-Oq=(*WfP?I(L<)0xdafj_D}7;G0~b^?;1qfOU~| zA@JjRLoc!^`AaK?!C5=T+tdwdZW3+HI!Eg~|&kA&%j4 zH8O0c!i;t1Lp(C7MFM+%LyogNXsuadN2Fyb2Ud*z;7Yx;o2o@li+Q=%gglEh(ufnz zOeeviKO}^?w3wSOFs+m2>c)DbRNRU`5b{ zmG`u8s{;YS0xEZ@V=ix$0cr80GtefV%c(iqw<|k$uaX5t?8i0XBJ1kyy++_ExxO+o zv9o1^sfa7POFF%^-hfU=#DjBK?c01|t6he(PZfUkguaF&RIL+mJjpn217qQMH3s2NWMDfJ(rn`c$1zC zU{Q*=eMz`A%}9_62iX*+Y$c8l}ZAMvpiizfC8 zAYPw84{QpUA$k zux+5Bj$^I7f)(%kozy`1X(Vlg|J<>kzJtdhY;53q4$1W%JW}T#?ZM!7lEd8PJtUAw z^1_R6v3+TsW#r59=z5a==#b;Y>+WSkZu6u1sx-N=UD{=udk87lL{BZSGed7pO(j(Hv?>cF!DjUaJOz|=~ zIsAzGkuC#`&PLu)7O9(*CV3;OC$>Snu>~`i*Y!q&c;W&)79vHGmFm2YqQY}M$J4bZ zLmKYZKbaD<_x5QI5h0qCyMcg~Q*BfC#2n{l^zikMm*-2KOai*q0uAId-Y7767l1yG zhx4vd!f=NzVVeQ?ZK|j?M6gFmxf7E?y#&1 zYtGBx7}oi<-JSAxjE({~q5xLVhna&${) zmbCtgIo+q>x7yHHP4}}JS(W*B+1hnO*<0D#BFPxefF?hzHX|&yE|XxQo^V@B>>sm2 ztKyv}v??5=Hjv4H+z?x4{h^8Xcf%vp1gDIks+$=nnCJfg_l4Fgq zmT0~QQaA|R(0x=nrY&%{oTCSWsH4h9i}$vv>3w@G9o{IF zH!l-e3Uw0=wHX)nCcRHe{3^p#o)X@X_FHZJ<6^HiLZ8YqEf5|rbF0^npZayJi;Ff6 zfyXt04K4@%4KBb_h_$n`bBfg0?F<*|-ksW4kKW5b@U@Gon34?oeF@IBn`22k9OX&n+qq*rz!@D zO7BjCe6^EoqI|I&&EqiihS_L#%$FUp zHH5}E+Yx9SDWq}IyagEwuXEHK&^>qDTTfDm2%U_qA^tYug(y$r^ky23c+NEHMxFMC z7L7iZF-Z-y+&CM#s8OFOmwcOVZocvij0R!<_EE~1g>sTcIO@lT7V*KoOXLWH_pr#3 zH{cRcfe)Ibo0HwL8|VbiX&(%vVF_&!m027hU;k+Ra2FmEf^EtRwuZWF-u>!ihN1;97h_h%IcYdO z1{mQ3T*WVT&dT|1tT)3LYfQ8`eYtZ1?&vn65$`bEV9AR9;1oZCS;Jn<2$f3#8m>2X zdTY)PQ(k{!BNyPU{|ReyGrCN;{!9+}!AZz#RD3#DQ{va%(mB*R;k$=c?&cNaJGZ!X zV=a~)%C5tOR2(yM>z&GQNME|ukEJ$c1M_wg2u#*E@U@EpRbWDZq|qCp)di zd^+b{e6r3~l~t!Ub8HFbqJFYTw|(;CvE#|xsOlrSMUC>#%mXU5P09qwRvmfrsI&3o z>cDIgdD8MZfwlzj{^O2HqqpxMEMvUC7~Tr}Is8aY##P4Yuc(yKYkSa~4vM$a0D&5& z$v}7Zo&ujvto7XuS*FQb&~gbk3|;g|iz2)K$a~i0x}0QklT8;Q2tf1Q8%cH5CT+D~loY3u8~NE@v55B)^)Q38RZi z&4Vu<@bl`AJ!m~vK9f%#kgVv!ycZm=JgN(~{GkQpIn?OSmxGR|B;i^hPW`Y7b&*9z zoQfS5)YY$YCXw)0{Pq#i0gr2jbT4Ru!JJDhfl0H8)~s&Q_q(z)*Va--;pc`@PZk1Y z`W!F4EMl(W$Zb`~_<%-cCB5cU5Ip#t(D5|XJRROv_17-x9WG63POORb`*WfErqttT zLpo=JKvEMy@8LQ7?NVgYWO^idOZQ;JDV$_4z2}s@@Kj(&9 zE7AgU!&ZGs1@vfbJFIt=q5 zzkFl00|dx}f7*kcWLd#H2golwU;TjWK_sodoTKszMXY*`suww4wYoo5w(wlGQ>ARp zv#!5Ot2g~i<%L(b{07J9NQPE#zKhyDO8`Rao&^h^OA*dv^_a<4gLH%=ChjDggFTCS zDN{hnn83LJ&i7^fW^+QBeWVB%?R%nLt(}!S?=;BiZ;@pEZU^LuQ;qwaC$_ZH-5%!k zfm=n7g0b5;WbDc(>YgwoQ{VGQ&?>)$+&cpvqk#I7q;w-D+sioUz1gmrWWY6fGUvn_$-myR zn&04MfXWJcev1D^uZAk?O;WaJKZ zvHEdl2xiisa~H)7C+sLuYviE&k462{HBW%kx3NFDv?!*Ys2*V=>dbk3ky7Vao-^ja+Hp88l|2@efD0n4;_a0vBeWp*$Z| z`tYEx!n9L=TsfCYMx0`X33gFl=5Sl8aAPDt(AR8M9)n(RzZ9J%YQ3O-sk%_TXN$jX zw<;WSxts<0A_o9n! zvs(rV`71n~g|s6X%)C9okB|V}3j}-``?9l=Y>?Q)Sa?Hun4Bbk-L}82Ix3y-wVJWR zvoNMnlFrLN=JA4Rj8bSni0eWB9K%pn;(}QIDLd6`IA3EET)mi3K{}rR?SQ{m{(*{A zGU%;>MS2~{GK;fo2|?%H`(I|ZOpX0=bCO1>4$u!!5~EGsSE}daY*P;t_FGdW z_Fc`$qDy7ZGE&DdM@y5lMmrcL+VCABIh4?p$i&WxzPgpopSW*I`o=)=c07ejN}c~a zO*|_OHF;fUQ^D}1-Q(Zt{gMXqEaM!tx`|_W3fe6T4aOJsI@S7%UIVXElusC-yH=b< z*zDvM@$p>VP+jeiIPyK~S5d)JF8ju^IY=ZroSg<{g~;|g9bY_0o* z49V<+Dn{_#Qr*>cs;U^M1mIM95f9l$QkEkrdharM%QgAmt-+eEg-5dSL#Jnulu(zEKo^EwmL>`9%6+)7+U=KX z9(nMr8uR>#nq@hJGqnnrvcM()%F*Y3-S|ePyRjWyE`yE7i1x{hYS}{+%v4Do|I3SV z1f@c5&zbe0bNw=w$uPw!H2Ib%A|C;zie0RO-;5Mfoi1_$C6!xde$hXWVJ-o_@5}roZs=aD-OA_141}({G;)*2Qq}_d1cxOWv}0wQMk+cu z^J#ZuMKzU@Q;WVFNnx#*gSgA!&F=U;%ZW8xOL3i2~TIm{_z1V zFzp){KWhn$U*(=FkUSpst6}!XEZrPMIS$abp7pc>)nQiIk=;LZc*{!Tlf<9XiqLFB zuyQ?WA;eISQ$bUIyg$U%Sr)tlM|d;8uvfW1tL~oueuP`CKDaiGH>{fa9w0x($KDp7 z%W@#siHZl=nIpZz>?8%fhlM+b1P1CF$hFN;&Nw1pbwf}3Ydd(My)kWYf#@~Qx?q9D z-`$6Y1E2QM&9;Ar+C>4oy_3q@(y?dat=G9y)%z98d^7GUM{st0u`YUzoFot|QUAAWnKJSNA^pEMS+M7UeBh1l zpzNbBQ5OSagGkSwhv|*9MwhLBur7K_T*A|Wn5p>3?X$yg0MToT__i#CO_=Q6NxL)N z*{Z~SY9jld_F+P_p6sNs5aE6ELwacEzD6hF3=bSE)1n`X$(Kue9i47x77R;0DR~4k zDMD-}{Mf|c=xEFv52gH2G?WXCV2L*awMeO@E*24Qd5f<&1K(I1q1L}SBl_yW5SY>X zM!<^v4%d$tGqZw=R?UGu%*Z2$rJ!`4L_C{c;nDzI@o8GkuBM@QhZBBnN1jtZ1k@}b zW+>#hbW5+bqedp#%L=Dw3r$)^w)KG*7lVM_kYgb92$k5iAYgd^!s@3L((Q<(DjAVF zMZKy{AS~U(@oV-vtO$XrOn(LRe=fR^t!AkqDLf>DlobZ`<-tgTb=%8KlQ*M{f6X1r zk8Gwdu8yTIN-l9!FKmm;DIuyVJxAFsyG~4b<+>x07|Y&Aj{u{;aTcF@qCl_)8z66P zTwKkmgT)D(e31}G<>^HaYC|PS`N;QyjC}3Vd=<~9q4ZQaNqi;bN zkJhcyXV#To=m8!qX){hJM=LaEbr4VfK{B*6yuxRAMuiGBAu%QS#vn4+m)x*U(zAox z^K_PeD}vgOk6bABn?1Slx2yTm`}ZLhKi_Vig8lkFRy>KfZx_8~x$A*Q07W9o9 z-Gir$$0x%)RUCekRy$o2)`lLT9sU-)Yv{EKCR=@KCA-J07r9jtCPY;sErsxLYFqSy zpRa`6c&VNj7E~Q>XAh<9oheJb>bN1#aCs7v_)4pLc*@?EQ&+qPN2_w=%C)pPU z`qG1bHY0&$@%oV_A8o70y;+Pe*M6Yqz<13#?yku5Wb}o`ow?(564Msn(j|Voblb)y zb|hBE{poM|>~Gfe*mNKoObWrCHJ1s*+--P2(MHqnkF%#k*V`bkd@4IA8qKtXEsQhun;iPD7L>yhJ6ke)XRkB}778=2oy^9^2e3 zJ#NlJ8DE3^`Vs~_&n-%CH4QYuQvMRM?o@?_ukh<3%KLsnYE>|n$%zJ-W^_wDDeco>i?Zrz?xQ(wY&{+Pf&Ui42owzveuMd`stT-H7EU&5ERaH&#e z0(CveCSFA9SD?iGTpHL|@f>w9Q3-0_h4h zpYBigduDhd=e4<>eUq|S} z{#`y z02v-yBI&;8gyghq(jl-vqS3YSvt_En#Tw`XN)VGp~NL=Whi+2*1ihlybmgwR9gCQFuzY zZz!)yx6?V~`Y_65uc;|1I~r6m`c?iMOi{S-7o1FWHLs%9x?(+c$M^k$ zC7NN3nW?iyc^q7-AWup4dqAAGsw(mBUg2A275Mznkl@sm-k@l@@U22}sGpjs>j z&=CJu+sS)Gp5p&Taevde{|<6K=*BQ1^khT;5UB7EMZfU_aFqUG!ADGJ`nUH0vXB4d z={GTe9Oxg8gD9bNg0+@7!_9at58^)#JEuiP)+JFI - wrapper.distributionType = DistributionType.ALL - - wrapper.doLast { - final DistributionLocator locator = new DistributionLocator() - final GradleVersion version = GradleVersion.version(wrapper.gradleVersion) - final URI distributionUri = locator.getDistributionFor(version, wrapper.distributionType.name().toLowerCase(Locale.ENGLISH)) - final URI sha256Uri = new URI(distributionUri.toString() + ".sha256") - final String sha256Sum = new String(sha256Uri.toURL().bytes) - wrapper.getPropertiesFile() << "distributionSha256Sum=${sha256Sum}\n" - } - } +task wrapper(type: Wrapper) { + distributionType = DistributionType.ALL + jarFile = file('.gradle-wrapper/gradle-wrapper.jar') + doLast { + final DistributionLocator locator = new DistributionLocator() + final GradleVersion version = GradleVersion.version(gradleVersion) + final URI distributionUri = locator.getDistributionFor(version, distributionType.name().toLowerCase(Locale.ENGLISH)) + final URI sha256Uri = new URI(distributionUri.toString() + ".sha256") + final String sha256Sum = new String(sha256Uri.toURL().bytes) + final String existingProperties = getPropertiesFile().getText('UTF-8') + getPropertiesFile().setText("${existingProperties}distributionSha256Sum=${sha256Sum}\n", 'UTF-8') } - } static void assertLinesInFile(final Path path, final List expectedLines) { diff --git a/gradlew b/gradlew index cccdd3d517f..1c1e5df9de9 100755 --- a/gradlew +++ b/gradlew @@ -64,7 +64,7 @@ case "`uname`" in ;; esac -CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar +CLASSPATH=$APP_HOME/.gradle-wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then diff --git a/gradlew.bat b/gradlew.bat index e95643d6a2c..ec246509418 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -63,7 +63,7 @@ set CMD_LINE_ARGS=%* :execute @rem Setup the command line -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar +set CLASSPATH=%APP_HOME%\.gradle-wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% From b34e5cf21684a38725ba7825a7b21505d97bb825 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 1 May 2018 08:52:22 -0700 Subject: [PATCH 51/68] [DOCS] Fixes users command name (#30275) --- x-pack/docs/en/security/troubleshooting.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc index 6b8e8841496..e805ed07a7d 100644 --- a/x-pack/docs/en/security/troubleshooting.asciidoc +++ b/x-pack/docs/en/security/troubleshooting.asciidoc @@ -64,15 +64,15 @@ the users. Any unknown roles are marked with `*`. -- [source, shell] ------------------------------------------ -bin/xpack/users list +bin/elasticsearch-users list rdeniro : admin alpacino : power_user jacknich : monitoring,unknown_role* <1> ------------------------------------------ <1> `unknown_role` was not found in `roles.yml` -For more information about this command, see -{ref}/users-command.html[Users Command]. +For more information about this command, see the +{ref}/users-command.html[`elasticsearch-users` command]. -- . If you are authenticating to LDAP, a number of configuration options can cause From 846783c2b6ed9e0cdde09030abfcb4369e649f14 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 1 May 2018 08:55:35 -0700 Subject: [PATCH 52/68] [DOCS] Removes redundant file realm settings (#30192) --- .../authentication/file-realm.asciidoc | 27 +------------------ .../en/settings/security-settings.asciidoc | 4 +++ 2 files changed, 5 insertions(+), 26 deletions(-) diff --git a/x-pack/docs/en/security/authentication/file-realm.asciidoc b/x-pack/docs/en/security/authentication/file-realm.asciidoc index 507baaf1f1f..000e204f67f 100644 --- a/x-pack/docs/en/security/authentication/file-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/file-realm.asciidoc @@ -57,32 +57,7 @@ xpack: [[file-realm-settings]] ===== File Realm Settings -[cols="4,^3,10"] -|======================= -| Setting | Required | Description -| `type` | yes | Indicates the realm type. Must be set to `file`. -| `order` | no | Indicates the priority of this realm within the - realm chain. Realms with a lower order are - consulted first. Although not required, we - recommend explicitly setting this value when you - configure multiple realms. Defaults to - `Integer.MAX_VALUE`. -| `enabled` | no | Indicates whether this realm is enabled or - disabled. Enables you to disable a realm without - removing its configuration. Defaults to `true`. -| `cache.ttl` | no | Specifies the time-to-live for cached user entries. - A user's credentials are cached for this period of - time. Specify the time period using the standard - Elasticsearch {ref}/common-options.html#time-units[time units]. - Defaults to `20m`. -| `cache.max_users` | no | Specifies the maximum number of user entries that - can be stored in the cache at one time. Defaults - to 100,000. -| `cache.hash_algo` | no | Specifies the hashing algorithm that is used for - the cached user credentials. See <> for the possible values. - (Expert Setting). -|======================= +See {ref}/security-settings.html#ref-users-settings[File Realm Settings]. ==== A Look Under the Hood diff --git a/x-pack/docs/en/settings/security-settings.asciidoc b/x-pack/docs/en/settings/security-settings.asciidoc index 139e54467b7..ba281b5ccb4 100644 --- a/x-pack/docs/en/settings/security-settings.asciidoc +++ b/x-pack/docs/en/settings/security-settings.asciidoc @@ -168,6 +168,10 @@ in-memory cached user credentials. For possible values, see [float] ===== File realm settings +The `type` setting must be set to `file`. In addition to the +<>, you can specify +the following settings: + `cache.ttl`:: The time-to-live for cached user entries. A user and a hash of its credentials are cached for this configured period of time. Defaults to `20m`. Specify values From 7933f5e28eeb378a9da047c71433f7dbb2b0e33e Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 1 May 2018 09:15:13 -0700 Subject: [PATCH 53/68] [DOCS] Adds Active Directory realm configuration details (#30223) --- .../active-directory-realm.asciidoc | 262 +----------------- ...onfiguring-active-directory-realm.asciidoc | 248 +++++++++++++++++ .../docs/en/security/configuring-es.asciidoc | 4 + .../securing-elasticsearch.asciidoc | 4 + .../securing-communications/tls-ad.asciidoc | 57 ++++ 5 files changed, 323 insertions(+), 252 deletions(-) create mode 100644 x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc create mode 100644 x-pack/docs/en/security/securing-communications/tls-ad.asciidoc diff --git a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc index 143156ca636..d76f6978688 100644 --- a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc @@ -6,14 +6,7 @@ users. To integrate with Active Directory, you configure an `active_directory` realm and map Active Directory users and groups to {security} roles in the <>. -To protect passwords, communications between Elasticsearch and the Active Directory -server should be encrypted using SSL/TLS. Clients and nodes that connect via -SSL/TLS to the Active Directory server need to have the Active Directory server's -certificate or the server's root CA certificate installed in their keystore or -truststore. For more information about installing certificates, see -<>. - -==== Configuring an Active Directory Realm +See {ref}/configuring-ad-realm.html[Configuring an Active Directory Realm]. {security} uses LDAP to communicate with Active Directory, so `active_directory` realms are similar to <>. Like LDAP directories, @@ -39,132 +32,6 @@ Active Directory. Once the user has been found, the Active Directory realm then retrieves the user's group memberships from the `tokenGroups` attribute on the user's entry in Active Directory. -To configure an `active_directory` realm: - -. Add a realm configuration of type `active_directory` to `elasticsearch.yml` -under the `xpack.security.authc.realms` namespace. At a minimum, you must set the realm -`type` to `active_directory` and specify the Active Directory `domain_name`. To -use SSL/TLS for secured communication with the Active Directory server, you must -also set the `url` attribute and specify the `ldaps` protocol and secure port -number. If you are configuring multiple realms, you should also explicitly set -the `order` attribute to control the order in which the realms are consulted -during authentication. See <> -for all of the options you can set for an `active_directory` realm. -+ -NOTE: Binding to Active Directory fails if the domain name is not mapped in DNS. - If DNS is not being provided by a Windows DNS server, add a mapping for - the domain in the local `/etc/hosts` file. -+ -For example, the following realm configuration configures {security} to connect -to `ldaps://example.com:636` to authenticate users through Active Directory. -+ -[source, yaml] ------------------------------------------------------------- -xpack: - security: - authc: - realms: - active_directory: - type: active_directory - order: 0 <1> - domain_name: ad.example.com - url: ldaps://ad.example.com:636 <2> ------------------------------------------------------------- -<1> The realm order controls the order in which the configured realms are checked - when authenticating a user. -<2> If you don't specify the URL, it defaults to `ldap::389`. -+ -IMPORTANT: When you configure realms in `elasticsearch.yml`, only the -realms you specify are used for authentication. If you also want to use the -`native` or `file` realms, you must include them in the realm chain. - -. Restart Elasticsearch. - -===== Configuring a Bind User -By default, all of the LDAP operations are run by the user that {security} is -authenticating. In some cases, regular users may not be able to access all of the -necessary items within Active Directory and a _bind user_ is needed. A bind user -can be configured and will be used to perform all operations other than the LDAP -bind request, which is required to authenticate the credentials provided by the user. - -The use of a bind user enables the <> to be -used with the Active Directory realm and the ability to maintain a set of pooled -connections to Active Directory. These pooled connection reduce the number of -resources that must be created and destroyed with every user authentication. - -The following example shows the configuration of a bind user through the user of the -`bind_dn` and `secure_bind_password` settings. - -[source, yaml] ------------------------------------------------------------- -xpack: - security: - authc: - realms: - active_directory: - type: active_directory - order: 0 - domain_name: ad.example.com - url: ldaps://ad.example.com:636 - bind_dn: es_svc_user@ad.example.com <1> ------------------------------------------------------------- -<1> This is the user that all Active Directory search requests are executed as. - Without a bind user configured, all requests run as the user that is authenticating - with Elasticsearch. - -The password for the `bind_dn` user should be configured by adding the appropriate -`secure_bind_password` setting to the {es} keystore. -For example, the following command adds the password for the example realm above: - -[source, shell] ------------------------------------------------------------- -bin/elasticsearch-keystore add xpack.security.authc.realms.active_directory.secure_bind_password ------------------------------------------------------------- - -When a bind user is configured, connection pooling is enabled by default. -Connection pooling can be disabled using the `user_search.pool.enabled` setting. - -===== Multiple Domain Support -When authenticating users across multiple domains in a forest, there are a few minor -differences in the configuration and the way that users will authenticate. The `domain_name` -setting should be set to the forest root domain name. The `url` setting also needs to -be set as you will need to authenticate against the Global Catalog, which uses a different -port and may not be running on every Domain Controller. - -For example, the following realm configuration configures {security} to connect to specific -Domain Controllers on the Global Catalog port with the domain name set to the forest root. - -[source, yaml] ------------------------------------------------------------- -xpack: - security: - authc: - realms: - active_directory: - type: active_directory - order: 0 - domain_name: example.com <1> - url: ldaps://dc1.ad.example.com:3269, ldaps://dc2.ad.example.com:3269 <2> - load_balance: - type: "round_robin" <3> ------------------------------------------------------------- -<1> The `domain_name` is set to the name of the root domain in the forest. -<2> The `url` value used in this example has URLs for two different Domain Controllers, -which are also Global Catalog servers. Port 3268 is the default port for unencrypted -communication with the Global Catalog; port 3269 is the default port for SSL connections. -The servers that are being connected to can be in any domain of the forest as long as -they are also Global Catalog servers. -<3> A load balancing setting is provided to indicate the desired behavior when choosing -the server to connect to. - -In this configuration, users will need to use either their full User Principal -Name (UPN) or their Down-Level Logon Name. A UPN is typically a concatenation of -the username with `@ - } }, - "enabled": true -} --------------------------------------------------- -// CONSOLE -<1> The Active Directory distinguished name (DN) of the `admins` group. - -[source,js] --------------------------------------------------- -PUT _xpack/security/role_mapping/basic_users -{ - "roles" : [ "user" ], - "rules" : { "any": [ - { "field" : { - "groups" : "cn=users,dc=example,dc=com" <1> - } }, - { "field" : { - "dn" : "cn=John Doe,cn=contractors,dc=example,dc=com" <2> - } } - ] }, - "enabled": true -} --------------------------------------------------- -// CONSOLE -<1> The Active Directory distinguished name (DN) of the `users` group. -<2> The Active Directory distinguished name (DN) of the user `John Doe`. - -Or, alternatively, configured via the role-mapping file: -[source, yaml] ------------------------------------------------------------- -monitoring: <1> - - "cn=admins,dc=example,dc=com" <2> -user: - - "cn=users,dc=example,dc=com" <3> - - "cn=admins,dc=example,dc=com" - - "cn=John Doe,cn=contractors,dc=example,dc=com" <4> ------------------------------------------------------------- -<1> The name of the role. -<2> The Active Directory distinguished name (DN) of the `admins` group. -<3> The Active Directory distinguished name (DN) of the `users` group. -<4> The Active Directory distinguished name (DN) of the user `John Doe`. - -For more information, see <>. +See {ref}/configuring-ad-realm.html[Configuring an Active Directory realm]. [[ad-user-metadata]] ==== User Metadata in Active Directory Realms + When a user is authenticated via an Active Directory realm, the following -properties are populated in the user's _metadata_. This metadata is returned in the -{ref}/security-api-authenticate.html[authenticate API], and can be used with -<> in roles. +properties are populated in the user's _metadata_: |======================= | Field | Description @@ -270,51 +64,15 @@ properties are populated in the user's _metadata_. This metadata is returned in groups were mapped to a role). |======================= +This metadata is returned in the +{ref}/security-api-authenticate.html[authenticate API] and can be used with +<> in roles. + Additional metadata can be extracted from the Active Directory server by configuring the `metadata` setting on the Active Directory realm. [[active-directory-ssl]] ==== Setting up SSL Between Elasticsearch and Active Directory -To protect the user credentials that are sent for authentication, it's highly -recommended to encrypt communications between Elasticsearch and your Active -Directory server. Connecting via SSL/TLS ensures that the identity of the Active -Directory server is authenticated before {security} transmits the user -credentials, and the usernames and passwords are encrypted in transit. - -To encrypt communications between Elasticsearch and Active Directory: - -. Configure each node to trust certificates signed by the CA that signed your -Active Directory server certificates. The following example demonstrates how to trust a CA certificate, -`cacert.pem`, located within the {xpack} configuration directory: -+ -[source,shell] --------------------------------------------------- -xpack: - security: - authc: - realms: - active_directory: - type: active_directory - order: 0 - domain_name: ad.example.com - url: ldaps://ad.example.com:636 - ssl: - certificate_authorities: [ "CONFIG_DIR/x-pack/cacert.pem" ] --------------------------------------------------- -+ -The CA cert must be a PEM encoded certificate. - -. Set the `url` attribute in the realm configuration to specify the LDAPS protocol -and the secure port number. For example, `url: ldaps://ad.example.com:636`. - -. Restart Elasticsearch. - -NOTE: By default, when you configure {security} to connect to Active Directory - using SSL/TLS, {security} attempts to verify the hostname or IP address - specified with the `url` attribute in the realm configuration with the - values in the certificate. If the values in the certificate and realm - configuration do not match, {security} does not allow a connection to the - Active Directory server. This is done to protect against man-in-the-middle - attacks. If necessary, you can disable this behavior by setting the - {ref}/security-settings.html#ssl-tls-settings[`ssl.verification_mode`] property to `certificate`. +See +{ref}/configuring-tls.html#tls-active-directory[Encrypting communications between {es} and Active Directory]. \ No newline at end of file diff --git a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc new file mode 100644 index 00000000000..6298bb8ef9f --- /dev/null +++ b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc @@ -0,0 +1,248 @@ +[role="xpack"] +[[configuring-ad-realm]] +=== Configuring an Active Directory realm + +You can configure {security} to communicate with Active Directory to authenticate +users. To integrate with Active Directory, you configure an `active_directory` +realm and map Active Directory users and groups to {security} roles in the role +mapping file. + +For more information about Active Directory realms, see +{xpack-ref}/active-directory-realm.html[Active Directory User Authentication]. + +. Add a realm configuration of type `active_directory` to `elasticsearch.yml` +under the `xpack.security.authc.realms` namespace. At a minimum, you must set +the realm `type` to `active_directory` and specify the Active Directory +`domain_name`. If you are configuring multiple realms, you should also +explicitly set the `order` attribute to control the order in which the realms +are consulted during authentication. ++ +-- +See <> for all of the options you can set for an +`active_directory` realm. + +NOTE: Binding to Active Directory fails if the domain name is not mapped in DNS. + If DNS is not being provided by a Windows DNS server, add a mapping for + the domain in the local `/etc/hosts` file. + +For example, the following realm configuration configures {security} to connect +to `ldaps://example.com:636` to authenticate users through Active Directory: + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + active_directory: + type: active_directory + order: 0 <1> + domain_name: ad.example.com + url: ldaps://ad.example.com:636 <2> +------------------------------------------------------------ +<1> The realm order controls the order in which the configured realms are checked + when authenticating a user. +<2> If you don't specify the URL, it defaults to `ldap::389`. + +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. If you also want to use the +`native` or `file` realms, you must include them in the realm chain. +-- + +. If you are authenticating users across multiple domains in a forest, extra +steps are required. There are a few minor differences in the configuration and +the way that users authenticate. ++ +-- +Set the `domain_name` setting to the forest root domain name. + +You must also set the `url` setting, since you must authenticate against the +Global Catalog, which uses a different port and might not be running on every +Domain Controller. + +For example, the following realm configuration configures {security} to connect +to specific Domain Controllers on the Global Catalog port with the domain name +set to the forest root: + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + active_directory: + type: active_directory + order: 0 + domain_name: example.com <1> + url: ldaps://dc1.ad.example.com:3269, ldaps://dc2.ad.example.com:3269 <2> + load_balance: + type: "round_robin" <3> +------------------------------------------------------------ +<1> The `domain_name` is set to the name of the root domain in the forest. +<2> The `url` value used in this example has URLs for two different Domain Controllers, +which are also Global Catalog servers. Port 3268 is the default port for unencrypted +communication with the Global Catalog; port 3269 is the default port for SSL connections. +The servers that are being connected to can be in any domain of the forest as long as +they are also Global Catalog servers. +<3> A load balancing setting is provided to indicate the desired behavior when choosing +the server to connect to. + +In this configuration, users will need to use either their full User Principal +Name (UPN) or their Down-Level Logon Name. A UPN is typically a concatenation of +the username with `@>. +-- + +. (Optional) To protect passwords, +<>. + +. Restart {es}. + +. Configure a bind user. ++ +-- +The Active Directory realm authenticates users using an LDAP bind request. By +default, all of the LDAP operations are run by the user that {security} is +authenticating. In some cases, regular users may not be able to access all of the +necessary items within Active Directory and a _bind user_ is needed. A bind user +can be configured and is used to perform all operations other than the LDAP bind +request, which is required to authenticate the credentials provided by the user. + +The use of a bind user enables the +{xpack-ref}/run-as-privilege.html[run as feature] to be used with the Active +Directory realm and the ability to maintain a set of pooled connections to +Active Directory. These pooled connection reduce the number of resources that +must be created and destroyed with every user authentication. + +The following example shows the configuration of a bind user through the user of +the `bind_dn` and `secure_bind_password` settings: + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + active_directory: + type: active_directory + order: 0 + domain_name: ad.example.com + url: ldaps://ad.example.com:636 + bind_dn: es_svc_user@ad.example.com <1> +------------------------------------------------------------ +<1> This is the user that all Active Directory search requests are executed as. + Without a bind user configured, all requests run as the user that is authenticating + with {es}. + +The password for the `bind_dn` user should be configured by adding the +appropriate `secure_bind_password` setting to the {es} keystore. For example, +the following command adds the password for the example realm above: + +[source, shell] +------------------------------------------------------------ +bin/elasticsearch-keystore add \ +xpack.security.authc.realms.active_directory.secure_bind_password +------------------------------------------------------------ + +When a bind user is configured, connection pooling is enabled by default. +Connection pooling can be disabled using the `user_search.pool.enabled` setting. +-- + +. Map Active Directory users and groups to roles. ++ +-- +An integral part of a realm authentication process is to resolve the roles +associated with the authenticated user. Roles define the privileges a user has +in the cluster. + +Since with the `active_directory` realm the users are managed externally in the +Active Directory server, the expectation is that their roles are managed there +as well. In fact, Active Directory supports the notion of groups, which often +represent user roles for different systems in the organization. + +The `active_directory` realm enables you to map Active Directory users to roles +via their Active Directory groups or other metadata. This role mapping can be +configured via the <> or by using +a file stored on each node. When a user authenticates against an Active +Directory realm, the privileges for that user are the union of all privileges +defined by the roles to which the user is mapped. + +Within a mapping definition, you specify groups using their distinguished +names. For example, the following mapping configuration maps the Active +Directory `admins` group to both the `monitoring` and `user` roles, maps the +`users` group to the `user` role and maps the `John Doe` user to the `user` +role. + +Configured via the role-mapping API: +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/admins +{ + "roles" : [ "monitoring" , "user" ], + "rules" : { "field" : { + "groups" : "cn=admins,dc=example,dc=com" <1> + } }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE +<1> The Active Directory distinguished name (DN) of the `admins` group. + +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/basic_users +{ + "roles" : [ "user" ], + "rules" : { "any": [ + { "field" : { + "groups" : "cn=users,dc=example,dc=com" <1> + } }, + { "field" : { + "dn" : "cn=John Doe,cn=contractors,dc=example,dc=com" <2> + } } + ] }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE +<1> The Active Directory distinguished name (DN) of the `users` group. +<2> The Active Directory distinguished name (DN) of the user `John Doe`. + +Or, alternatively, configured via the role-mapping file: +[source, yaml] +------------------------------------------------------------ +monitoring: <1> + - "cn=admins,dc=example,dc=com" <2> +user: + - "cn=users,dc=example,dc=com" <3> + - "cn=admins,dc=example,dc=com" + - "cn=John Doe,cn=contractors,dc=example,dc=com" <4> +------------------------------------------------------------ +<1> The name of the role. +<2> The Active Directory distinguished name (DN) of the `admins` group. +<3> The Active Directory distinguished name (DN) of the `users` group. +<4> The Active Directory distinguished name (DN) of the user `John Doe`. + +For more information, see +{xpack-ref}/mapping-roles.html[Mapping users and groups to roles]. +-- + +. (Optional) Configure the `metadata` setting in the Active Directory realm to +include extra properties in the user's metadata. ++ +-- +By default, `ldap_dn` and `ldap_groups` are populated in the user's metadata. +For more information, see +{xpack-ref}/active-directory-realm.html#ad-user-metadata[User Metadata in Active Directory Realms]. +-- diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index aab00fb225f..3efc682216f 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -70,6 +70,9 @@ user API. -- +. Choose which types of realms you want to use to authenticate users. +** <>. + . Set up roles and users to control access to {es}. For example, to grant _John Doe_ full access to all indices that match the pattern `events*` and enable him to create visualizations and dashboards @@ -128,5 +131,6 @@ include::securing-communications/securing-elasticsearch.asciidoc[] include::securing-communications/configuring-tls-docker.asciidoc[] include::securing-communications/enabling-cipher-suites.asciidoc[] include::securing-communications/separating-node-client-traffic.asciidoc[] +include::authentication/configuring-active-directory-realm.asciidoc[] include::{xes-repo-dir}/settings/security-settings.asciidoc[] include::{xes-repo-dir}/settings/audit-settings.asciidoc[] diff --git a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc index b100567edf8..e5c1187264f 100644 --- a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc +++ b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc @@ -20,9 +20,13 @@ information, see <>. .. Required: <>. .. Recommended: <>. +. If you are using Active Directory user authentication, +<>. + For more information about encrypting communications across the Elastic Stack, see {xpack-ref}/encrypting-communications.html[Encrypting Communications]. include::node-certificates.asciidoc[] include::tls-transport.asciidoc[] include::tls-http.asciidoc[] +include::tls-ad.asciidoc[] diff --git a/x-pack/docs/en/security/securing-communications/tls-ad.asciidoc b/x-pack/docs/en/security/securing-communications/tls-ad.asciidoc new file mode 100644 index 00000000000..d189501f1e2 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/tls-ad.asciidoc @@ -0,0 +1,57 @@ +[role="xpack"] +[[tls-active-directory]] +==== Encrypting communications between {es} and Active Directory + +To protect the user credentials that are sent for authentication, it's highly +recommended to encrypt communications between {es} and your Active Directory +server. Connecting via SSL/TLS ensures that the identity of the Active Directory +server is authenticated before {security} transmits the user credentials and the +usernames and passwords are encrypted in transit. + +Clients and nodes that connect via SSL/TLS to the Active Directory server need +to have the Active Directory server's certificate or the server's root CA +certificate installed in their keystore or truststore. + +. Create the realm configuration for the `xpack.security.authc.realms` namespace +in the `elasticsearch.yml` file. See <>. + +. Set the `url` attribute in the realm configuration to specify the LDAPS protocol +and the secure port number. For example, `url: ldaps://ad.example.com:636`. + +. Configure each node to trust certificates signed by the certificate authority +(CA) that signed your Active Directory server certificates. ++ +-- +The following example demonstrates how to trust a CA certificate (`cacert.pem`), +which is located within the configuration directory: + +[source,shell] +-------------------------------------------------- +xpack: + security: + authc: + realms: + active_directory: + type: active_directory + order: 0 + domain_name: ad.example.com + url: ldaps://ad.example.com:636 + ssl: + certificate_authorities: [ "CONFIG_DIR/cacert.pem" ] +-------------------------------------------------- + +The CA cert must be a PEM encoded certificate. + +For more information about these settings, see <>. +-- + +. Restart {es}. + +NOTE: By default, when you configure {security} to connect to Active Directory + using SSL/TLS, {security} attempts to verify the hostname or IP address + specified with the `url` attribute in the realm configuration with the + values in the certificate. If the values in the certificate and realm + configuration do not match, {security} does not allow a connection to the + Active Directory server. This is done to protect against man-in-the-middle + attacks. If necessary, you can disable this behavior by setting the + `ssl.verification_mode` property to `certificate`. From abe797bd4207db35ee79d079e661a760af9d2436 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 1 May 2018 13:17:24 -0400 Subject: [PATCH 54/68] SQL: Teach the CLI to ignore empty commands (#30265) Cause the CLI to ignore commands that are empty or consist only of newlines. This is a fairly standard thing for SQL CLIs to do. It looks like: ``` sql> ; sql> | | ; sql> exit; Bye! ``` I think I *could* have implemented this with a `CliCommand` that throws out empty string but it felt simpler to bake it in to the `CliRepl`. Closes #30000 --- .../elasticsearch/xpack/sql/cli/CliRepl.java | 5 +++++ .../xpack/sql/cli/CliReplTests.java | 22 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java index f591ced77ec..956c62eaa1f 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java @@ -56,6 +56,11 @@ public class CliRepl { multiLine.setLength(0); } + // Skip empty commands + if (line.isEmpty()) { + continue; + } + // special case to handle exit if (isExit(line)) { cliTerminal.line().em("Bye!").ln(); diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java index 2397418256a..31aa4749221 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java @@ -38,6 +38,28 @@ public class CliReplTests extends ESTestCase { verifyNoMoreInteractions(mockCommand, mockSession); } + /** + * Test that empty commands are skipped. This includes commands that are + * just new lines. + */ + public void testEmptyNotSent() { + CliTerminal cliTerminal = new TestTerminal( + ";", + "", + "", + ";", + "exit;" + ); + + CliSession mockSession = mock(CliSession.class); + CliCommand mockCommand = mock(CliCommand.class); + + CliRepl cli = new CliRepl(cliTerminal, mockSession, mockCommand); + cli.execute(); + + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "logo"); + verifyNoMoreInteractions(mockSession, mockCommand); + } public void testFatalCliExceptionHandling() throws Exception { CliTerminal cliTerminal = new TestTerminal( From 0f688e08678c668b6d5c1bfc2c7a8b86771fbc69 Mon Sep 17 00:00:00 2001 From: lcawley Date: Tue, 1 May 2018 10:21:39 -0700 Subject: [PATCH 55/68] [DOCS] Fixes section error --- .../security/authentication/active-directory-realm.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc index d76f6978688..f0f209d3fa3 100644 --- a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc @@ -33,7 +33,7 @@ retrieves the user's group memberships from the `tokenGroups` attribute on the user's entry in Active Directory. [[ad-load-balancing]] -===== Load Balancing and Failover +==== Load Balancing and Failover The `load_balance.type` setting can be used at the realm level to configure how {security} should interact with multiple Active Directory servers. Two modes of operation are supported: failover and load balancing. @@ -41,7 +41,7 @@ operation are supported: failover and load balancing. See {ref}/security-settings.html#load-balancing[Load Balancing and Failover Settings]. [[ad-settings]] -===== Active Directory Realm Settings +==== Active Directory Realm Settings See {ref}/security-settings.html#ref-ad-settings[Active Directory Realm Settings]. From 5b5c98c96b4926e25432a20ffc57a431678b5651 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 1 May 2018 10:34:26 -0700 Subject: [PATCH 56/68] [DOCS] Adds changelog to Elasticsearch Reference (#30271) --- docs/CHANGELOG.asciidoc | 53 ++++++++++++++----- docs/reference/index-shared4.asciidoc | 2 +- .../en/release-notes/7.0.0-alpha1.asciidoc | 1 - 3 files changed, 40 insertions(+), 16 deletions(-) diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 2bad49c4807..c774b023254 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -1,12 +1,26 @@ +[[es-release-notes]] += {es} Release Notes + +[partintro] +-- // Use these for links to issue and pulls. Note issues and pulls redirect one to // each other on Github, so don't worry too much on using the right prefix. :issue: https://github.com/elastic/elasticsearch/issues/ :pull: https://github.com/elastic/elasticsearch/pull/ -= Elasticsearch Release Notes +This section summarizes the changes in each release. -== Elasticsearch 7.0.0 +* <> +* <> + +-- + +[[release-notes-7.0.0]] +== {es} 7.0.0 + +[float] +[[breaking-7.0.0]] === Breaking Changes <> ({pull}30185[#30185]) -=== Breaking Java Changes +//[float] +//=== Breaking Java Changes -=== Deprecations +//[float] +//=== Deprecations -=== New Features +//[float] +//=== New Features -=== Enhancements +//[float] +//=== Enhancements +[float] === Bug Fixes Fail snapshot operations early when creating or deleting a snapshot on a repository that has been written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140]) -=== Regressions +//[float] +//=== Regressions -=== Known Issues +//[float] +//=== Known Issues -== Elasticsearch version 6.4.0 +[[release-notes-6.4.0]] +== {es} 6.4.0 -=== New Features +//[float] +//=== New Features +[float] === Enhancements <> ({pull}30255[#30255]) +[float] === Bug Fixes Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) -=== Regressions - -=== Known Issues - +//[float] +//=== Regressions +//[float] +//=== Known Issues diff --git a/docs/reference/index-shared4.asciidoc b/docs/reference/index-shared4.asciidoc index 3d807dd98d3..5e6ebc8a5a2 100644 --- a/docs/reference/index-shared4.asciidoc +++ b/docs/reference/index-shared4.asciidoc @@ -5,4 +5,4 @@ include::testing.asciidoc[] include::glossary.asciidoc[] -include::release-notes.asciidoc[] +include::{docdir}/../CHANGELOG.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/release-notes/7.0.0-alpha1.asciidoc b/x-pack/docs/en/release-notes/7.0.0-alpha1.asciidoc index b68970c4c9d..2c264f48e33 100644 --- a/x-pack/docs/en/release-notes/7.0.0-alpha1.asciidoc +++ b/x-pack/docs/en/release-notes/7.0.0-alpha1.asciidoc @@ -21,6 +21,5 @@ ones that the user is authorized to access in case field level security is enabl See also: -* <> * {kibana-ref}/xkb-7.0.0-alpha1.html[{kib} {xpack} 7.0.0-alpha1 Release Notes] * {logstash-ref}/xls-7.0.0-alpha1.html[Logstash {xpack} 7.0.0-alpha1 Release Notes] From 0be443c5bbd4c7eb5776740d8fb7117224124cce Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 1 May 2018 14:31:23 -0400 Subject: [PATCH 57/68] REST Client: Add Request object flavored methods (#29623) Adds two new methods to `RestClient` that take a `Request` object. These methods will allows us to add more per-request customizable options without creating more and more and more overloads of the `performRequest` and `performRequestAsync` methods. These new methods look like: ``` Response performRequest(Request request) ``` and ``` void performRequestAsync(Request request, ResponseListener responseListener) ``` This change doesn't add any actual features but enables adding things like per request timeouts and per request node selectors. This change *does* rework the `HighLevelRestClient` and its tests to use these new `Request` objects and it does update the docs. --- .../elasticsearch/client/ClusterClient.java | 4 +- .../elasticsearch/client/IndicesClient.java | 94 ++--- .../{Request.java => RequestConverters.java} | 363 +++++++++--------- .../client/RestHighLevelClient.java | 76 ++-- .../CustomRestHighLevelClientTests.java | 19 +- ...Tests.java => RequestConvertersTests.java} | 217 ++++------- .../client/RestHighLevelClientTests.java | 102 ++--- .../org/elasticsearch/client/Request.java | 202 ++++++++++ .../org/elasticsearch/client/RestClient.java | 159 ++++++-- .../elasticsearch/client/RequestTests.java | 137 +++++++ .../RestClientMultipleHostsIntegTests.java | 4 +- .../client/RestClientSingleHostTests.java | 101 +++-- .../elasticsearch/client/RestClientTests.java | 70 +++- .../RestClientDocumentation.java | 134 +++---- docs/CHANGELOG.asciidoc | 7 + docs/java-rest/low-level/usage.asciidoc | 123 +++--- .../reindex/remote/RemoteRequestBuilders.java | 6 +- 17 files changed, 1084 insertions(+), 734 deletions(-) rename client/rest-high-level/src/main/java/org/elasticsearch/client/{Request.java => RequestConverters.java} (78%) rename client/rest-high-level/src/test/java/org/elasticsearch/client/{RequestTests.java => RequestConvertersTests.java} (89%) create mode 100644 client/rest/src/main/java/org/elasticsearch/client/Request.java create mode 100644 client/rest/src/test/java/org/elasticsearch/client/RequestTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index 177e33d7270..f3c84db79d6 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -48,7 +48,7 @@ public final class ClusterClient { */ public ClusterUpdateSettingsResponse putSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, Request::clusterPutSettings, + return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, ClusterUpdateSettingsResponse::fromXContent, emptySet(), headers); } @@ -60,7 +60,7 @@ public final class ClusterClient { */ public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, Request::clusterPutSettings, + restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, ClusterUpdateSettingsResponse::fromXContent, listener, emptySet(), headers); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index ff9c612e1d4..445fd7c6a99 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -74,8 +74,8 @@ public final class IndicesClient { * Delete Index API on elastic.co */ public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, - emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex, + DeleteIndexResponse::fromXContent, emptySet(), headers); } /** @@ -85,8 +85,8 @@ public final class IndicesClient { * Delete Index API on elastic.co */ public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, - listener, emptySet(), headers); + restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex, + DeleteIndexResponse::fromXContent, listener, emptySet(), headers); } /** @@ -96,8 +96,8 @@ public final class IndicesClient { * Create Index API on elastic.co */ public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, - emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, RequestConverters::createIndex, + CreateIndexResponse::fromXContent, emptySet(), headers); } /** @@ -107,8 +107,8 @@ public final class IndicesClient { * Create Index API on elastic.co */ public void createAsync(CreateIndexRequest createIndexRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, - listener, emptySet(), headers); + restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, RequestConverters::createIndex, + CreateIndexResponse::fromXContent, listener, emptySet(), headers); } /** @@ -118,8 +118,8 @@ public final class IndicesClient { * Put Mapping API on elastic.co */ public PutMappingResponse putMapping(PutMappingRequest putMappingRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent, - emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, RequestConverters::putMapping, + PutMappingResponse::fromXContent, emptySet(), headers); } /** @@ -130,8 +130,8 @@ public final class IndicesClient { */ public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent, - listener, emptySet(), headers); + restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, RequestConverters::putMapping, + PutMappingResponse::fromXContent, listener, emptySet(), headers); } /** @@ -142,7 +142,7 @@ public final class IndicesClient { * Index Aliases API on elastic.co */ public IndicesAliasesResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, Request::updateAliases, + return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases, IndicesAliasesResponse::fromXContent, emptySet(), headers); } @@ -155,7 +155,7 @@ public final class IndicesClient { */ public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, Request::updateAliases, + restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases, IndicesAliasesResponse::fromXContent, listener, emptySet(), headers); } @@ -166,8 +166,8 @@ public final class IndicesClient { * Open Index API on elastic.co */ public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, - emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, RequestConverters::openIndex, + OpenIndexResponse::fromXContent, emptySet(), headers); } /** @@ -177,8 +177,8 @@ public final class IndicesClient { * Open Index API on elastic.co */ public void openAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, - listener, emptySet(), headers); + restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, RequestConverters::openIndex, + OpenIndexResponse::fromXContent, listener, emptySet(), headers); } /** @@ -188,8 +188,8 @@ public final class IndicesClient { * Close Index API on elastic.co */ public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, - emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, RequestConverters::closeIndex, + CloseIndexResponse::fromXContent, emptySet(), headers); } /** @@ -199,8 +199,8 @@ public final class IndicesClient { * Close Index API on elastic.co */ public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, - listener, emptySet(), headers); + restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, RequestConverters::closeIndex, + CloseIndexResponse::fromXContent, listener, emptySet(), headers); } /** @@ -210,8 +210,8 @@ public final class IndicesClient { * Indices Aliases API on elastic.co */ public boolean existsAlias(GetAliasesRequest getAliasesRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequest(getAliasesRequest, Request::existsAlias, RestHighLevelClient::convertExistsResponse, - emptySet(), headers); + return restHighLevelClient.performRequest(getAliasesRequest, RequestConverters::existsAlias, + RestHighLevelClient::convertExistsResponse, emptySet(), headers); } /** @@ -221,8 +221,8 @@ public final class IndicesClient { * Indices Aliases API on elastic.co */ public void existsAliasAsync(GetAliasesRequest getAliasesRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsync(getAliasesRequest, Request::existsAlias, RestHighLevelClient::convertExistsResponse, - listener, emptySet(), headers); + restHighLevelClient.performRequestAsync(getAliasesRequest, RequestConverters::existsAlias, + RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers); } /** @@ -231,7 +231,7 @@ public final class IndicesClient { * See Refresh API on elastic.co */ public RefreshResponse refresh(RefreshRequest refreshRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(refreshRequest, Request::refresh, RefreshResponse::fromXContent, + return restHighLevelClient.performRequestAndParseEntity(refreshRequest, RequestConverters::refresh, RefreshResponse::fromXContent, emptySet(), headers); } @@ -241,7 +241,7 @@ public final class IndicesClient { * See Refresh API on elastic.co */ public void refreshAsync(RefreshRequest refreshRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, Request::refresh, RefreshResponse::fromXContent, + restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, RequestConverters::refresh, RefreshResponse::fromXContent, listener, emptySet(), headers); } @@ -251,7 +251,7 @@ public final class IndicesClient { * See Flush API on elastic.co */ public FlushResponse flush(FlushRequest flushRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(flushRequest, Request::flush, FlushResponse::fromXContent, + return restHighLevelClient.performRequestAndParseEntity(flushRequest, RequestConverters::flush, FlushResponse::fromXContent, emptySet(), headers); } @@ -261,7 +261,7 @@ public final class IndicesClient { * See Flush API on elastic.co */ public void flushAsync(FlushRequest flushRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, Request::flush, FlushResponse::fromXContent, + restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, RequestConverters::flush, FlushResponse::fromXContent, listener, emptySet(), headers); } @@ -272,8 +272,8 @@ public final class IndicesClient { * Force Merge API on elastic.co */ public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, Request::forceMerge, ForceMergeResponse::fromXContent, - emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, RequestConverters::forceMerge, + ForceMergeResponse::fromXContent, emptySet(), headers); } /** @@ -283,8 +283,8 @@ public final class IndicesClient { * Force Merge API on elastic.co */ public void forceMergeAsync(ForceMergeRequest forceMergeRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, Request::forceMerge, ForceMergeResponse::fromXContent, - listener, emptySet(), headers); + restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, RequestConverters::forceMerge, + ForceMergeResponse::fromXContent, listener, emptySet(), headers); } /** @@ -294,7 +294,7 @@ public final class IndicesClient { * Clear Cache API on elastic.co */ public ClearIndicesCacheResponse clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, Request::clearCache, + return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache, ClearIndicesCacheResponse::fromXContent, emptySet(), headers); } @@ -306,7 +306,7 @@ public final class IndicesClient { */ public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, Request::clearCache, + restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache, ClearIndicesCacheResponse::fromXContent, listener, emptySet(), headers); } @@ -319,7 +319,7 @@ public final class IndicesClient { public boolean exists(GetIndexRequest request, Header... headers) throws IOException { return restHighLevelClient.performRequest( request, - Request::indicesExist, + RequestConverters::indicesExist, RestHighLevelClient::convertExistsResponse, Collections.emptySet(), headers @@ -335,7 +335,7 @@ public final class IndicesClient { public void existsAsync(GetIndexRequest request, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsync( request, - Request::indicesExist, + RequestConverters::indicesExist, RestHighLevelClient::convertExistsResponse, listener, Collections.emptySet(), @@ -350,7 +350,7 @@ public final class IndicesClient { * Shrink Index API on elastic.co */ public ResizeResponse shrink(ResizeRequest resizeRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(resizeRequest, Request::shrink, ResizeResponse::fromXContent, + return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::shrink, ResizeResponse::fromXContent, emptySet(), headers); } @@ -361,7 +361,7 @@ public final class IndicesClient { * Shrink Index API on elastic.co */ public void shrinkAsync(ResizeRequest resizeRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, Request::shrink, ResizeResponse::fromXContent, + restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::shrink, ResizeResponse::fromXContent, listener, emptySet(), headers); } @@ -372,7 +372,7 @@ public final class IndicesClient { * Split Index API on elastic.co */ public ResizeResponse split(ResizeRequest resizeRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(resizeRequest, Request::split, ResizeResponse::fromXContent, + return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::split, ResizeResponse::fromXContent, emptySet(), headers); } @@ -383,7 +383,7 @@ public final class IndicesClient { * Split Index API on elastic.co */ public void splitAsync(ResizeRequest resizeRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, Request::split, ResizeResponse::fromXContent, + restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::split, ResizeResponse::fromXContent, listener, emptySet(), headers); } @@ -394,8 +394,8 @@ public final class IndicesClient { * Rollover Index API on elastic.co */ public RolloverResponse rollover(RolloverRequest rolloverRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, Request::rollover, RolloverResponse::fromXContent, - emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, RequestConverters::rollover, + RolloverResponse::fromXContent, emptySet(), headers); } /** @@ -405,7 +405,7 @@ public final class IndicesClient { * Rollover Index API on elastic.co */ public void rolloverAsync(RolloverRequest rolloverRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, Request::rollover, RolloverResponse::fromXContent, + restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, RequestConverters::rollover, RolloverResponse::fromXContent, listener, emptySet(), headers); } @@ -416,7 +416,7 @@ public final class IndicesClient { * API on elastic.co */ public UpdateSettingsResponse putSettings(UpdateSettingsRequest updateSettingsRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, Request::indexPutSettings, + return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings, UpdateSettingsResponse::fromXContent, emptySet(), headers); } @@ -428,7 +428,7 @@ public final class IndicesClient { */ public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, Request::indexPutSettings, + restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings, UpdateSettingsResponse::fromXContent, listener, emptySet(), headers); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java similarity index 78% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 192da83f8b4..d4cac4cc635 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -89,117 +89,85 @@ import java.util.Collections; import java.util.HashMap; import java.util.Locale; import java.util.Map; -import java.util.Objects; import java.util.StringJoiner; -public final class Request { - +final class RequestConverters { static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON; - private final String method; - private final String endpoint; - private final Map parameters; - private final HttpEntity entity; - - public Request(String method, String endpoint, Map parameters, HttpEntity entity) { - this.method = Objects.requireNonNull(method, "method cannot be null"); - this.endpoint = Objects.requireNonNull(endpoint, "endpoint cannot be null"); - this.parameters = Objects.requireNonNull(parameters, "parameters cannot be null"); - this.entity = entity; - } - - public String getMethod() { - return method; - } - - public String getEndpoint() { - return endpoint; - } - - public Map getParameters() { - return parameters; - } - - public HttpEntity getEntity() { - return entity; - } - - @Override - public String toString() { - return "Request{" + - "method='" + method + '\'' + - ", endpoint='" + endpoint + '\'' + - ", params=" + parameters + - ", hasBody=" + (entity != null) + - '}'; + private RequestConverters() { + // Contains only status utility methods } static Request delete(DeleteRequest deleteRequest) { String endpoint = endpoint(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - Params parameters = Params.builder(); + Params parameters = new Params(request); parameters.withRouting(deleteRequest.routing()); parameters.withTimeout(deleteRequest.timeout()); parameters.withVersion(deleteRequest.version()); parameters.withVersionType(deleteRequest.versionType()); parameters.withRefreshPolicy(deleteRequest.getRefreshPolicy()); parameters.withWaitForActiveShards(deleteRequest.waitForActiveShards()); - - return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null); + return request; } static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) { String endpoint = endpoint(deleteIndexRequest.indices()); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - Params parameters = Params.builder(); + Params parameters = new Params(request); parameters.withTimeout(deleteIndexRequest.timeout()); parameters.withMasterTimeout(deleteIndexRequest.masterNodeTimeout()); parameters.withIndicesOptions(deleteIndexRequest.indicesOptions()); - - return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null); + return request; } static Request openIndex(OpenIndexRequest openIndexRequest) { String endpoint = endpoint(openIndexRequest.indices(), "_open"); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params parameters = Params.builder(); - + Params parameters = new Params(request); parameters.withTimeout(openIndexRequest.timeout()); parameters.withMasterTimeout(openIndexRequest.masterNodeTimeout()); parameters.withWaitForActiveShards(openIndexRequest.waitForActiveShards()); parameters.withIndicesOptions(openIndexRequest.indicesOptions()); - - return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); + return request; } static Request closeIndex(CloseIndexRequest closeIndexRequest) { String endpoint = endpoint(closeIndexRequest.indices(), "_close"); - Params parameters = Params.builder(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + + Params parameters = new Params(request); parameters.withTimeout(closeIndexRequest.timeout()); parameters.withMasterTimeout(closeIndexRequest.masterNodeTimeout()); parameters.withIndicesOptions(closeIndexRequest.indicesOptions()); - return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); + return request; } static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException { String endpoint = endpoint(createIndexRequest.indices()); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); - Params parameters = Params.builder(); + Params parameters = new Params(request); parameters.withTimeout(createIndexRequest.timeout()); parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout()); parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards()); - HttpEntity entity = createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); + request.setEntity(createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; } static Request updateAliases(IndicesAliasesRequest indicesAliasesRequest) throws IOException { - Params parameters = Params.builder(); + Request request = new Request(HttpPost.METHOD_NAME, "/_aliases"); + + Params parameters = new Params(request); parameters.withTimeout(indicesAliasesRequest.timeout()); parameters.withMasterTimeout(indicesAliasesRequest.masterNodeTimeout()); - HttpEntity entity = createEntity(indicesAliasesRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpPost.METHOD_NAME, "/_aliases", parameters.getParams(), entity); + request.setEntity(createEntity(indicesAliasesRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; } static Request putMapping(PutMappingRequest putMappingRequest) throws IOException { @@ -208,63 +176,69 @@ public final class Request { throw new IllegalArgumentException("concreteIndex cannot be set on PutMapping requests made over the REST API"); } - String endpoint = endpoint(putMappingRequest.indices(), "_mapping", putMappingRequest.type()); + Request request = new Request(HttpPut.METHOD_NAME, endpoint(putMappingRequest.indices(), "_mapping", putMappingRequest.type())); - Params parameters = Params.builder(); + Params parameters = new Params(request); parameters.withTimeout(putMappingRequest.timeout()); parameters.withMasterTimeout(putMappingRequest.masterNodeTimeout()); - HttpEntity entity = createEntity(putMappingRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); + request.setEntity(createEntity(putMappingRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; } static Request refresh(RefreshRequest refreshRequest) { String[] indices = refreshRequest.indices() == null ? Strings.EMPTY_ARRAY : refreshRequest.indices(); - String endpoint = endpoint(indices, "_refresh"); - Params parameters = Params.builder(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_refresh")); + + Params parameters = new Params(request); parameters.withIndicesOptions(refreshRequest.indicesOptions()); - return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); + return request; } static Request flush(FlushRequest flushRequest) { String[] indices = flushRequest.indices() == null ? Strings.EMPTY_ARRAY : flushRequest.indices(); - String endpoint = endpoint(indices, "_flush"); - Params parameters = Params.builder(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_flush")); + + Params parameters = new Params(request); parameters.withIndicesOptions(flushRequest.indicesOptions()); parameters.putParam("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing())); parameters.putParam("force", Boolean.toString(flushRequest.force())); - return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); + return request; } static Request forceMerge(ForceMergeRequest forceMergeRequest) { String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); - String endpoint = endpoint(indices, "_forcemerge"); - Params parameters = Params.builder(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_forcemerge")); + + Params parameters = new Params(request); parameters.withIndicesOptions(forceMergeRequest.indicesOptions()); parameters.putParam("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); parameters.putParam("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); parameters.putParam("flush", Boolean.toString(forceMergeRequest.flush())); - return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); + return request; } static Request clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest) { String[] indices = clearIndicesCacheRequest.indices() == null ? Strings.EMPTY_ARRAY :clearIndicesCacheRequest.indices(); - String endpoint = endpoint(indices, "_cache/clear"); - Params parameters = Params.builder(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_cache/clear")); + + Params parameters = new Params(request); parameters.withIndicesOptions(clearIndicesCacheRequest.indicesOptions()); parameters.putParam("query", Boolean.toString(clearIndicesCacheRequest.queryCache())); parameters.putParam("fielddata", Boolean.toString(clearIndicesCacheRequest.fieldDataCache())); parameters.putParam("request", Boolean.toString(clearIndicesCacheRequest.requestCache())); parameters.putParam("fields", String.join(",", clearIndicesCacheRequest.fields())); - return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); + return request; } static Request info() { - return new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); + return new Request(HttpGet.METHOD_NAME, "/"); } static Request bulk(BulkRequest bulkRequest) throws IOException { - Params parameters = Params.builder(); + Request request = new Request(HttpPost.METHOD_NAME, "/_bulk"); + + Params parameters = new Params(request); parameters.withTimeout(bulkRequest.timeout()); parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy()); @@ -273,14 +247,14 @@ public final class Request { // and this content-type is supported by the Bulk API. XContentType bulkContentType = null; for (int i = 0; i < bulkRequest.numberOfActions(); i++) { - DocWriteRequest request = bulkRequest.requests().get(i); + DocWriteRequest action = bulkRequest.requests().get(i); - DocWriteRequest.OpType opType = request.opType(); + DocWriteRequest.OpType opType = action.opType(); if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { - bulkContentType = enforceSameContentType((IndexRequest) request, bulkContentType); + bulkContentType = enforceSameContentType((IndexRequest) action, bulkContentType); } else if (opType == DocWriteRequest.OpType.UPDATE) { - UpdateRequest updateRequest = (UpdateRequest) request; + UpdateRequest updateRequest = (UpdateRequest) action; if (updateRequest.doc() != null) { bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType); } @@ -298,30 +272,30 @@ public final class Request { final ContentType requestContentType = createContentType(bulkContentType); ByteArrayOutputStream content = new ByteArrayOutputStream(); - for (DocWriteRequest request : bulkRequest.requests()) { - DocWriteRequest.OpType opType = request.opType(); + for (DocWriteRequest action : bulkRequest.requests()) { + DocWriteRequest.OpType opType = action.opType(); try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) { metadata.startObject(); { metadata.startObject(opType.getLowercase()); - if (Strings.hasLength(request.index())) { - metadata.field("_index", request.index()); + if (Strings.hasLength(action.index())) { + metadata.field("_index", action.index()); } - if (Strings.hasLength(request.type())) { - metadata.field("_type", request.type()); + if (Strings.hasLength(action.type())) { + metadata.field("_type", action.type()); } - if (Strings.hasLength(request.id())) { - metadata.field("_id", request.id()); + if (Strings.hasLength(action.id())) { + metadata.field("_id", action.id()); } - if (Strings.hasLength(request.routing())) { - metadata.field("routing", request.routing()); + if (Strings.hasLength(action.routing())) { + metadata.field("routing", action.routing()); } - if (request.version() != Versions.MATCH_ANY) { - metadata.field("version", request.version()); + if (action.version() != Versions.MATCH_ANY) { + metadata.field("version", action.version()); } - VersionType versionType = request.versionType(); + VersionType versionType = action.versionType(); if (versionType != VersionType.INTERNAL) { if (versionType == VersionType.EXTERNAL) { metadata.field("version_type", "external"); @@ -333,12 +307,12 @@ public final class Request { } if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { - IndexRequest indexRequest = (IndexRequest) request; + IndexRequest indexRequest = (IndexRequest) action; if (Strings.hasLength(indexRequest.getPipeline())) { metadata.field("pipeline", indexRequest.getPipeline()); } } else if (opType == DocWriteRequest.OpType.UPDATE) { - UpdateRequest updateRequest = (UpdateRequest) request; + UpdateRequest updateRequest = (UpdateRequest) action; if (updateRequest.retryOnConflict() > 0) { metadata.field("retry_on_conflict", updateRequest.retryOnConflict()); } @@ -357,7 +331,7 @@ public final class Request { BytesRef source = null; if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { - IndexRequest indexRequest = (IndexRequest) request; + IndexRequest indexRequest = (IndexRequest) action; BytesReference indexSource = indexRequest.source(); XContentType indexXContentType = indexRequest.getContentType(); @@ -369,7 +343,7 @@ public final class Request { } } } else if (opType == DocWriteRequest.OpType.UPDATE) { - source = XContentHelper.toXContent((UpdateRequest) request, bulkContentType, false).toBytesRef(); + source = XContentHelper.toXContent((UpdateRequest) action, bulkContentType, false).toBytesRef(); } if (source != null) { @@ -377,20 +351,22 @@ public final class Request { content.write(separator); } } - - HttpEntity entity = new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType); - return new Request(HttpPost.METHOD_NAME, "/_bulk", parameters.getParams(), entity); + request.setEntity(new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); + return request; } static Request exists(GetRequest getRequest) { - Request request = get(getRequest); - return new Request(HttpHead.METHOD_NAME, request.endpoint, request.parameters, null); + return getStyleRequest(HttpHead.METHOD_NAME, getRequest); } static Request get(GetRequest getRequest) { - String endpoint = endpoint(getRequest.index(), getRequest.type(), getRequest.id()); + return getStyleRequest(HttpGet.METHOD_NAME, getRequest); + } - Params parameters = Params.builder(); + private static Request getStyleRequest(String method, GetRequest getRequest) { + Request request = new Request(method, endpoint(getRequest.index(), getRequest.type(), getRequest.id())); + + Params parameters = new Params(request); parameters.withPreference(getRequest.preference()); parameters.withRouting(getRequest.routing()); parameters.withRefresh(getRequest.refresh()); @@ -400,25 +376,28 @@ public final class Request { parameters.withVersionType(getRequest.versionType()); parameters.withFetchSourceContext(getRequest.fetchSourceContext()); - return new Request(HttpGet.METHOD_NAME, endpoint, parameters.getParams(), null); + return request; } static Request multiGet(MultiGetRequest multiGetRequest) throws IOException { - Params parameters = Params.builder(); + Request request = new Request(HttpPost.METHOD_NAME, "/_mget"); + + Params parameters = new Params(request); parameters.withPreference(multiGetRequest.preference()); parameters.withRealtime(multiGetRequest.realtime()); parameters.withRefresh(multiGetRequest.refresh()); - HttpEntity entity = createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpPost.METHOD_NAME, "/_mget", parameters.getParams(), entity); + + request.setEntity(createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; } static Request index(IndexRequest indexRequest) { String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME; - boolean isCreate = (indexRequest.opType() == DocWriteRequest.OpType.CREATE); String endpoint = endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id(), isCreate ? "_create" : null); + Request request = new Request(method, endpoint); - Params parameters = Params.builder(); + Params parameters = new Params(request); parameters.withRouting(indexRequest.routing()); parameters.withTimeout(indexRequest.timeout()); parameters.withVersion(indexRequest.version()); @@ -429,19 +408,19 @@ public final class Request { BytesRef source = indexRequest.source().toBytesRef(); ContentType contentType = createContentType(indexRequest.getContentType()); - HttpEntity entity = new ByteArrayEntity(source.bytes, source.offset, source.length, contentType); - - return new Request(method, endpoint, parameters.getParams(), entity); + request.setEntity(new ByteArrayEntity(source.bytes, source.offset, source.length, contentType)); + return request; } static Request ping() { - return new Request(HttpHead.METHOD_NAME, "/", Collections.emptyMap(), null); + return new Request(HttpHead.METHOD_NAME, "/"); } static Request update(UpdateRequest updateRequest) throws IOException { String endpoint = endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update"); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params parameters = Params.builder(); + Params parameters = new Params(request); parameters.withRouting(updateRequest.routing()); parameters.withTimeout(updateRequest.timeout()); parameters.withRefreshPolicy(updateRequest.getRefreshPolicy()); @@ -472,14 +451,14 @@ public final class Request { if (xContentType == null) { xContentType = Requests.INDEX_CONTENT_TYPE; } - - HttpEntity entity = createEntity(updateRequest, xContentType); - return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), entity); + request.setEntity(createEntity(updateRequest, xContentType)); + return request; } static Request search(SearchRequest searchRequest) throws IOException { - String endpoint = endpoint(searchRequest.indices(), searchRequest.types(), "_search"); - Params params = Params.builder(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchRequest.types(), "_search")); + + Params params = new Params(request); params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); params.withRouting(searchRequest.routing()); params.withPreference(searchRequest.preference()); @@ -495,65 +474,73 @@ public final class Request { if (searchRequest.scroll() != null) { params.putParam("scroll", searchRequest.scroll().keepAlive()); } - HttpEntity entity = null; + if (searchRequest.source() != null) { - entity = createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE); + request.setEntity(createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE)); } - return new Request(HttpPost.METHOD_NAME, endpoint, params.getParams(), entity); + return request; } static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException { - HttpEntity entity = createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpPost.METHOD_NAME, "/_search/scroll", Collections.emptyMap(), entity); + Request request = new Request(HttpPost.METHOD_NAME, "/_search/scroll"); + request.setEntity(createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; } static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOException { - HttpEntity entity = createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpDelete.METHOD_NAME, "/_search/scroll", Collections.emptyMap(), entity); + Request request = new Request(HttpDelete.METHOD_NAME, "/_search/scroll"); + request.setEntity(createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; } static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException { - Params params = Params.builder(); + Request request = new Request(HttpPost.METHOD_NAME, "/_msearch"); + + Params params = new Params(request); params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); if (multiSearchRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) { params.putParam("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests())); } + XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent); - HttpEntity entity = new ByteArrayEntity(source, createContentType(xContent.type())); - return new Request(HttpPost.METHOD_NAME, "/_msearch", params.getParams(), entity); + request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type()))); + return request; } static Request existsAlias(GetAliasesRequest getAliasesRequest) { - Params params = Params.builder(); - params.withIndicesOptions(getAliasesRequest.indicesOptions()); - params.withLocal(getAliasesRequest.local()); if ((getAliasesRequest.indices() == null || getAliasesRequest.indices().length == 0) && (getAliasesRequest.aliases() == null || getAliasesRequest.aliases().length == 0)) { throw new IllegalArgumentException("existsAlias requires at least an alias or an index"); } String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices(); String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases(); - String endpoint = endpoint(indices, "_alias", aliases); - return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null); + + Request request = new Request(HttpHead.METHOD_NAME, endpoint(indices, "_alias", aliases)); + + Params params = new Params(request); + params.withIndicesOptions(getAliasesRequest.indicesOptions()); + params.withLocal(getAliasesRequest.local()); + return request; } static Request fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest) { - Params params = Params.builder(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint(fieldCapabilitiesRequest.indices(), "_field_caps")); + + Params params = new Params(request); params.withFields(fieldCapabilitiesRequest.fields()); params.withIndicesOptions(fieldCapabilitiesRequest.indicesOptions()); - - String[] indices = fieldCapabilitiesRequest.indices(); - String endpoint = endpoint(indices, "_field_caps"); - return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), null); + return request; } static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { - String endpoint = endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval"); - Params params = Params.builder(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval")); + + Params params = new Params(request); params.withIndicesOptions(rankEvalRequest.indicesOptions()); - HttpEntity entity = createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), entity); + + request.setEntity(createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE)); + return request; } static Request split(ResizeRequest resizeRequest) throws IOException { @@ -571,64 +558,76 @@ public final class Request { } private static Request resize(ResizeRequest resizeRequest) throws IOException { - Params params = Params.builder(); - params.withTimeout(resizeRequest.timeout()); - params.withMasterTimeout(resizeRequest.masterNodeTimeout()); - params.withWaitForActiveShards(resizeRequest.getTargetIndexRequest().waitForActiveShards()); String endpoint = new EndpointBuilder().addPathPart(resizeRequest.getSourceIndex()) .addPathPartAsIs("_" + resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT)) .addPathPart(resizeRequest.getTargetIndexRequest().index()).build(); - HttpEntity entity = createEntity(resizeRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpPut.METHOD_NAME, endpoint, params.getParams(), entity); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + + Params params = new Params(request); + params.withTimeout(resizeRequest.timeout()); + params.withMasterTimeout(resizeRequest.masterNodeTimeout()); + params.withWaitForActiveShards(resizeRequest.getTargetIndexRequest().waitForActiveShards()); + + request.setEntity(createEntity(resizeRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; } static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException { - Params parameters = Params.builder(); + Request request = new Request(HttpPut.METHOD_NAME, "/_cluster/settings"); + + Params parameters = new Params(request); parameters.withTimeout(clusterUpdateSettingsRequest.timeout()); parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout()); - HttpEntity entity = createEntity(clusterUpdateSettingsRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpPut.METHOD_NAME, "/_cluster/settings", parameters.getParams(), entity); + + request.setEntity(createEntity(clusterUpdateSettingsRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; } static Request rollover(RolloverRequest rolloverRequest) throws IOException { - Params params = Params.builder(); + String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover") + .addPathPart(rolloverRequest.getNewIndexName()).build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + + Params params = new Params(request); params.withTimeout(rolloverRequest.timeout()); params.withMasterTimeout(rolloverRequest.masterNodeTimeout()); params.withWaitForActiveShards(rolloverRequest.getCreateIndexRequest().waitForActiveShards()); if (rolloverRequest.isDryRun()) { params.putParam("dry_run", Boolean.TRUE.toString()); } - String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover") - .addPathPart(rolloverRequest.getNewIndexName()).build(); - HttpEntity entity = createEntity(rolloverRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpPost.METHOD_NAME, endpoint, params.getParams(), entity); + + request.setEntity(createEntity(rolloverRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; } - static Request indicesExist(GetIndexRequest request) { + static Request indicesExist(GetIndexRequest getIndexRequest) { // this can be called with no indices as argument by transport client, not via REST though - if (request.indices() == null || request.indices().length == 0) { + if (getIndexRequest.indices() == null || getIndexRequest.indices().length == 0) { throw new IllegalArgumentException("indices are mandatory"); } - String endpoint = endpoint(request.indices(), ""); - Params params = Params.builder(); - params.withLocal(request.local()); - params.withHuman(request.humanReadable()); - params.withIndicesOptions(request.indicesOptions()); - params.withIncludeDefaults(request.includeDefaults()); - return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null); + String endpoint = endpoint(getIndexRequest.indices(), ""); + Request request = new Request(HttpHead.METHOD_NAME, endpoint); + + Params params = new Params(request); + params.withLocal(getIndexRequest.local()); + params.withHuman(getIndexRequest.humanReadable()); + params.withIndicesOptions(getIndexRequest.indicesOptions()); + params.withIncludeDefaults(getIndexRequest.includeDefaults()); + return request; } static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) throws IOException { - Params parameters = Params.builder(); + String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint(indices, "_settings")); + + Params parameters = new Params(request); parameters.withTimeout(updateSettingsRequest.timeout()); parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout()); parameters.withIndicesOptions(updateSettingsRequest.indicesOptions()); parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting()); - String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices(); - String endpoint = endpoint(indices, "_settings"); - HttpEntity entity = createEntity(updateSettingsRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); + request.setEntity(createEntity(updateSettingsRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; } private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { @@ -678,19 +677,19 @@ public final class Request { } /** - * Utility class to build request's parameters map and centralize all parameter names. + * Utility class to help with common parameter names and patterns. Wraps + * a {@link Request} and adds the parameters to it directly. */ static class Params { - private final Map params = new HashMap<>(); + private final Request request; - private Params() { + Params(Request request) { + this.request = request; } - Params putParam(String key, String value) { + Params putParam(String name, String value) { if (Strings.hasLength(value)) { - if (params.putIfAbsent(key, value) != null) { - throw new IllegalArgumentException("Request parameter [" + key + "] is already registered"); - } + request.addParameter(name, value); } return this; } @@ -854,14 +853,6 @@ public final class Request { } return this; } - - Map getParams() { - return Collections.unmodifiableMap(params); - } - - static Params builder() { - return new Params(); - } } /** diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index c6d5e947f2c..9de8a748dad 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -258,7 +258,7 @@ public class RestHighLevelClient implements Closeable { * See Bulk API on elastic.co */ public final BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, emptySet(), headers); + return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, BulkResponse::fromXContent, emptySet(), headers); } /** @@ -267,14 +267,14 @@ public class RestHighLevelClient implements Closeable { * See Bulk API on elastic.co */ public final void bulkAsync(BulkRequest bulkRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, listener, emptySet(), headers); + performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, BulkResponse::fromXContent, listener, emptySet(), headers); } /** * Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise */ public final boolean ping(Header... headers) throws IOException { - return performRequest(new MainRequest(), (request) -> Request.ping(), RestHighLevelClient::convertExistsResponse, + return performRequest(new MainRequest(), (request) -> RequestConverters.ping(), RestHighLevelClient::convertExistsResponse, emptySet(), headers); } @@ -282,8 +282,8 @@ public class RestHighLevelClient implements Closeable { * Get the cluster info otherwise provided when sending an HTTP request to port 9200 */ public final MainResponse info(Header... headers) throws IOException { - return performRequestAndParseEntity(new MainRequest(), (request) -> Request.info(), MainResponse::fromXContent, emptySet(), - headers); + return performRequestAndParseEntity(new MainRequest(), (request) -> RequestConverters.info(), + MainResponse::fromXContent, emptySet(), headers); } /** @@ -292,7 +292,7 @@ public class RestHighLevelClient implements Closeable { * See Get API on elastic.co */ public final GetResponse get(GetRequest getRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, singleton(404), headers); + return performRequestAndParseEntity(getRequest, RequestConverters::get, GetResponse::fromXContent, singleton(404), headers); } /** @@ -301,7 +301,8 @@ public class RestHighLevelClient implements Closeable { * See Get API on elastic.co */ public final void getAsync(GetRequest getRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, listener, singleton(404), headers); + performRequestAsyncAndParseEntity(getRequest, RequestConverters::get, GetResponse::fromXContent, listener, + singleton(404), headers); } /** @@ -310,7 +311,8 @@ public class RestHighLevelClient implements Closeable { * See Multi Get API on elastic.co */ public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(multiGetRequest, Request::multiGet, MultiGetResponse::fromXContent, singleton(404), headers); + return performRequestAndParseEntity(multiGetRequest, RequestConverters::multiGet, MultiGetResponse::fromXContent, + singleton(404), headers); } /** @@ -319,7 +321,7 @@ public class RestHighLevelClient implements Closeable { * See Multi Get API on elastic.co */ public final void multiGetAsync(MultiGetRequest multiGetRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(multiGetRequest, Request::multiGet, MultiGetResponse::fromXContent, listener, + performRequestAsyncAndParseEntity(multiGetRequest, RequestConverters::multiGet, MultiGetResponse::fromXContent, listener, singleton(404), headers); } @@ -329,7 +331,7 @@ public class RestHighLevelClient implements Closeable { * See Get API on elastic.co */ public final boolean exists(GetRequest getRequest, Header... headers) throws IOException { - return performRequest(getRequest, Request::exists, RestHighLevelClient::convertExistsResponse, emptySet(), headers); + return performRequest(getRequest, RequestConverters::exists, RestHighLevelClient::convertExistsResponse, emptySet(), headers); } /** @@ -338,7 +340,8 @@ public class RestHighLevelClient implements Closeable { * See Get API on elastic.co */ public final void existsAsync(GetRequest getRequest, ActionListener listener, Header... headers) { - performRequestAsync(getRequest, Request::exists, RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers); + performRequestAsync(getRequest, RequestConverters::exists, RestHighLevelClient::convertExistsResponse, listener, + emptySet(), headers); } /** @@ -347,7 +350,7 @@ public class RestHighLevelClient implements Closeable { * See Index API on elastic.co */ public final IndexResponse index(IndexRequest indexRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, emptySet(), headers); + return performRequestAndParseEntity(indexRequest, RequestConverters::index, IndexResponse::fromXContent, emptySet(), headers); } /** @@ -356,7 +359,8 @@ public class RestHighLevelClient implements Closeable { * See Index API on elastic.co */ public final void indexAsync(IndexRequest indexRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, listener, emptySet(), headers); + performRequestAsyncAndParseEntity(indexRequest, RequestConverters::index, IndexResponse::fromXContent, listener, + emptySet(), headers); } /** @@ -365,7 +369,7 @@ public class RestHighLevelClient implements Closeable { * See Update API on elastic.co */ public final UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, emptySet(), headers); + return performRequestAndParseEntity(updateRequest, RequestConverters::update, UpdateResponse::fromXContent, emptySet(), headers); } /** @@ -374,7 +378,8 @@ public class RestHighLevelClient implements Closeable { * See Update API on elastic.co */ public final void updateAsync(UpdateRequest updateRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, listener, emptySet(), headers); + performRequestAsyncAndParseEntity(updateRequest, RequestConverters::update, UpdateResponse::fromXContent, listener, + emptySet(), headers); } /** @@ -383,8 +388,8 @@ public class RestHighLevelClient implements Closeable { * See Delete API on elastic.co */ public final DeleteResponse delete(DeleteRequest deleteRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(deleteRequest, Request::delete, DeleteResponse::fromXContent, Collections.singleton(404), - headers); + return performRequestAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent, + singleton(404), headers); } /** @@ -393,7 +398,7 @@ public class RestHighLevelClient implements Closeable { * See Delete API on elastic.co */ public final void deleteAsync(DeleteRequest deleteRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(deleteRequest, Request::delete, DeleteResponse::fromXContent, listener, + performRequestAsyncAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent, listener, Collections.singleton(404), headers); } @@ -403,7 +408,7 @@ public class RestHighLevelClient implements Closeable { * See Search API on elastic.co */ public final SearchResponse search(SearchRequest searchRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, emptySet(), headers); + return performRequestAndParseEntity(searchRequest, RequestConverters::search, SearchResponse::fromXContent, emptySet(), headers); } /** @@ -412,7 +417,8 @@ public class RestHighLevelClient implements Closeable { * See Search API on elastic.co */ public final void searchAsync(SearchRequest searchRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, listener, emptySet(), headers); + performRequestAsyncAndParseEntity(searchRequest, RequestConverters::search, SearchResponse::fromXContent, listener, + emptySet(), headers); } /** @@ -422,7 +428,7 @@ public class RestHighLevelClient implements Closeable { * elastic.co */ public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(multiSearchRequest, Request::multiSearch, MultiSearchResponse::fromXContext, + return performRequestAndParseEntity(multiSearchRequest, RequestConverters::multiSearch, MultiSearchResponse::fromXContext, emptySet(), headers); } @@ -433,7 +439,7 @@ public class RestHighLevelClient implements Closeable { * elastic.co */ public final void multiSearchAsync(MultiSearchRequest searchRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(searchRequest, Request::multiSearch, MultiSearchResponse::fromXContext, listener, + performRequestAsyncAndParseEntity(searchRequest, RequestConverters::multiSearch, MultiSearchResponse::fromXContext, listener, emptySet(), headers); } @@ -444,7 +450,8 @@ public class RestHighLevelClient implements Closeable { * API on elastic.co */ public final SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(searchScrollRequest, Request::searchScroll, SearchResponse::fromXContent, emptySet(), headers); + return performRequestAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, SearchResponse::fromXContent, + emptySet(), headers); } /** @@ -455,7 +462,7 @@ public class RestHighLevelClient implements Closeable { */ public final void searchScrollAsync(SearchScrollRequest searchScrollRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(searchScrollRequest, Request::searchScroll, SearchResponse::fromXContent, + performRequestAsyncAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, SearchResponse::fromXContent, listener, emptySet(), headers); } @@ -466,7 +473,7 @@ public class RestHighLevelClient implements Closeable { * Clear Scroll API on elastic.co */ public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(clearScrollRequest, Request::clearScroll, ClearScrollResponse::fromXContent, + return performRequestAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, ClearScrollResponse::fromXContent, emptySet(), headers); } @@ -478,7 +485,7 @@ public class RestHighLevelClient implements Closeable { */ public final void clearScrollAsync(ClearScrollRequest clearScrollRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(clearScrollRequest, Request::clearScroll, ClearScrollResponse::fromXContent, + performRequestAsyncAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, ClearScrollResponse::fromXContent, listener, emptySet(), headers); } @@ -489,7 +496,8 @@ public class RestHighLevelClient implements Closeable { * on elastic.co */ public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(rankEvalRequest, Request::rankEval, RankEvalResponse::fromXContent, emptySet(), headers); + return performRequestAndParseEntity(rankEvalRequest, RequestConverters::rankEval, RankEvalResponse::fromXContent, + emptySet(), headers); } /** @@ -499,8 +507,8 @@ public class RestHighLevelClient implements Closeable { * on elastic.co */ public final void rankEvalAsync(RankEvalRequest rankEvalRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(rankEvalRequest, Request::rankEval, RankEvalResponse::fromXContent, listener, emptySet(), - headers); + performRequestAsyncAndParseEntity(rankEvalRequest, RequestConverters::rankEval, RankEvalResponse::fromXContent, listener, + emptySet(), headers); } /** @@ -511,7 +519,7 @@ public class RestHighLevelClient implements Closeable { */ public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps, + return performRequestAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps, FieldCapabilitiesResponse::fromXContent, emptySet(), headers); } @@ -524,7 +532,7 @@ public class RestHighLevelClient implements Closeable { public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps, + performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps, FieldCapabilitiesResponse::fromXContent, listener, emptySet(), headers); } @@ -544,9 +552,10 @@ public class RestHighLevelClient implements Closeable { throw validationException; } Request req = requestConverter.apply(request); + req.setHeaders(headers); Response response; try { - response = client.performRequest(req.getMethod(), req.getEndpoint(), req.getParameters(), req.getEntity(), headers); + response = client.performRequest(req); } catch (ResponseException e) { if (ignores.contains(e.getResponse().getStatusLine().getStatusCode())) { try { @@ -593,9 +602,10 @@ public class RestHighLevelClient implements Closeable { listener.onFailure(e); return; } + req.setHeaders(headers); ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores); - client.performRequestAsync(req.getMethod(), req.getEndpoint(), req.getParameters(), req.getEntity(), responseListener, headers); + client.performRequestAsync(req, responseListener); } final ResponseListener wrapResponseListener(CheckedFunction responseConverter, diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java index 42496822090..617b35c4d40 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.client; import org.apache.http.Header; -import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.ProtocolVersion; import org.apache.http.RequestLine; @@ -52,14 +51,9 @@ import java.util.Collections; import java.util.List; import java.util.stream.Collectors; -import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyMapOf; -import static org.mockito.Matchers.anyObject; -import static org.mockito.Matchers.anyVararg; -import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -79,14 +73,15 @@ public class CustomRestHighLevelClientTests extends ESTestCase { final RestClient restClient = mock(RestClient.class); restHighLevelClient = new CustomRestClient(restClient); - doAnswer(mock -> mockPerformRequest((Header) mock.getArguments()[4])) + doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders()[0])) .when(restClient) - .performRequest(eq(HttpGet.METHOD_NAME), eq(ENDPOINT), anyMapOf(String.class, String.class), anyObject(), anyVararg()); + .performRequest(any(Request.class)); - doAnswer(mock -> mockPerformRequestAsync((Header) mock.getArguments()[5], (ResponseListener) mock.getArguments()[4])) + doAnswer(inv -> mockPerformRequestAsync( + ((Request) inv.getArguments()[0]).getHeaders()[0], + (ResponseListener) inv.getArguments()[1])) .when(restClient) - .performRequestAsync(eq(HttpGet.METHOD_NAME), eq(ENDPOINT), anyMapOf(String.class, String.class), - any(HttpEntity.class), any(ResponseListener.class), anyVararg()); + .performRequestAsync(any(Request.class), any(ResponseListener.class)); } } @@ -193,7 +188,7 @@ public class CustomRestHighLevelClientTests extends ESTestCase { } Request toRequest(MainRequest mainRequest) throws IOException { - return new Request(HttpGet.METHOD_NAME, ENDPOINT, emptyMap(), null); + return new Request(HttpGet.METHOD_NAME, ENDPOINT); } MainResponse toResponse(Response response) throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java similarity index 89% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 0fdeb7555a0..3f9428a3aea 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -82,6 +82,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.client.RequestConverters.EndpointBuilder; +import org.elasticsearch.client.RequestConverters.Params; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.query.TermQueryBuilder; @@ -124,8 +126,8 @@ import java.util.function.Function; import java.util.function.Supplier; import static java.util.Collections.singletonMap; -import static org.elasticsearch.client.Request.REQUEST_BODY_CONTENT_TYPE; -import static org.elasticsearch.client.Request.enforceSameContentType; +import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE; +import static org.elasticsearch.client.RequestConverters.enforceSameContentType; import static org.elasticsearch.index.RandomCreateIndexGenerator.randomAliases; import static org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest; import static org.elasticsearch.index.RandomCreateIndexGenerator.randomIndexSettings; @@ -137,40 +139,9 @@ import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.nullValue; -public class RequestTests extends ESTestCase { - - public void testConstructor() { - final String method = randomFrom("GET", "PUT", "POST", "HEAD", "DELETE"); - final String endpoint = randomAlphaOfLengthBetween(1, 10); - final Map parameters = singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5)); - final HttpEntity entity = randomBoolean() ? new StringEntity(randomAlphaOfLengthBetween(1, 100), ContentType.TEXT_PLAIN) : null; - - NullPointerException e = expectThrows(NullPointerException.class, () -> new Request(null, endpoint, parameters, entity)); - assertEquals("method cannot be null", e.getMessage()); - - e = expectThrows(NullPointerException.class, () -> new Request(method, null, parameters, entity)); - assertEquals("endpoint cannot be null", e.getMessage()); - - e = expectThrows(NullPointerException.class, () -> new Request(method, endpoint, null, entity)); - assertEquals("parameters cannot be null", e.getMessage()); - - final Request request = new Request(method, endpoint, parameters, entity); - assertEquals(method, request.getMethod()); - assertEquals(endpoint, request.getEndpoint()); - assertEquals(parameters, request.getParameters()); - assertEquals(entity, request.getEntity()); - - final Constructor[] constructors = Request.class.getConstructors(); - assertEquals("Expected only 1 constructor", 1, constructors.length); - assertTrue("Request constructor is not public", Modifier.isPublic(constructors[0].getModifiers())); - } - - public void testClassVisibility() { - assertTrue("Request class is not public", Modifier.isPublic(Request.class.getModifiers())); - } - +public class RequestConvertersTests extends ESTestCase { public void testPing() { - Request request = Request.ping(); + Request request = RequestConverters.ping(); assertEquals("/", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertNull(request.getEntity()); @@ -178,7 +149,7 @@ public class RequestTests extends ESTestCase { } public void testInfo() { - Request request = Request.info(); + Request request = RequestConverters.info(); assertEquals("/", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertNull(request.getEntity()); @@ -186,7 +157,7 @@ public class RequestTests extends ESTestCase { } public void testGet() { - getAndExistsTest(Request::get, HttpGet.METHOD_NAME); + getAndExistsTest(RequestConverters::get, HttpGet.METHOD_NAME); } public void testMultiGet() throws IOException { @@ -232,7 +203,7 @@ public class RequestTests extends ESTestCase { multiGetRequest.add(item); } - Request request = Request.multiGet(multiGetRequest); + Request request = RequestConverters.multiGet(multiGetRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals("/_mget", request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); @@ -260,7 +231,7 @@ public class RequestTests extends ESTestCase { } } - Request request = Request.delete(deleteRequest); + Request request = RequestConverters.delete(deleteRequest); assertEquals("/" + index + "/" + type + "/" + id, request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); @@ -268,7 +239,7 @@ public class RequestTests extends ESTestCase { } public void testExists() { - getAndExistsTest(Request::exists, HttpHead.METHOD_NAME); + getAndExistsTest(RequestConverters::exists, HttpHead.METHOD_NAME); } public void testIndicesExist() { @@ -282,7 +253,7 @@ public class RequestTests extends ESTestCase { setRandomHumanReadable(getIndexRequest, expectedParams); setRandomIncludeDefaults(getIndexRequest, expectedParams); - final Request request = Request.indicesExist(getIndexRequest); + final Request request = RequestConverters.indicesExist(getIndexRequest); assertEquals(HttpHead.METHOD_NAME, request.getMethod()); assertEquals("/" + String.join(",", indices), request.getEndpoint()); @@ -291,8 +262,8 @@ public class RequestTests extends ESTestCase { } public void testIndicesExistEmptyIndices() { - expectThrows(IllegalArgumentException.class, () -> Request.indicesExist(new GetIndexRequest())); - expectThrows(IllegalArgumentException.class, () -> Request.indicesExist(new GetIndexRequest().indices((String[])null))); + expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest())); + expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest().indices((String[])null))); } private static void getAndExistsTest(Function requestConverter, String method) { @@ -361,7 +332,7 @@ public class RequestTests extends ESTestCase { setRandomMasterTimeout(createIndexRequest, expectedParams); setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams); - Request request = Request.createIndex(createIndexRequest); + Request request = RequestConverters.createIndex(createIndexRequest); assertEquals("/" + createIndexRequest.index(), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertEquals(HttpPut.METHOD_NAME, request.getMethod()); @@ -382,7 +353,7 @@ public class RequestTests extends ESTestCase { setRandomTimeout(indicesAliasesRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); setRandomMasterTimeout(indicesAliasesRequest, expectedParams); - Request request = Request.updateAliases(indicesAliasesRequest); + Request request = RequestConverters.updateAliases(indicesAliasesRequest); assertEquals("/_aliases", request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertToXContentBody(indicesAliasesRequest, request.getEntity()); @@ -402,7 +373,7 @@ public class RequestTests extends ESTestCase { setRandomTimeout(putMappingRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); setRandomMasterTimeout(putMappingRequest, expectedParams); - Request request = Request.putMapping(putMappingRequest); + Request request = RequestConverters.putMapping(putMappingRequest); StringJoiner endpoint = new StringJoiner("/", "/", ""); String index = String.join(",", indices); if (Strings.hasLength(index)) { @@ -427,7 +398,7 @@ public class RequestTests extends ESTestCase { setRandomIndicesOptions(deleteIndexRequest::indicesOptions, deleteIndexRequest::indicesOptions, expectedParams); - Request request = Request.deleteIndex(deleteIndexRequest); + Request request = RequestConverters.deleteIndex(deleteIndexRequest); assertEquals("/" + String.join(",", indices), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); @@ -451,7 +422,7 @@ public class RequestTests extends ESTestCase { setRandomIndicesOptions(openIndexRequest::indicesOptions, openIndexRequest::indicesOptions, expectedParams); setRandomWaitForActiveShards(openIndexRequest::waitForActiveShards, expectedParams); - Request request = Request.openIndex(openIndexRequest); + Request request = RequestConverters.openIndex(openIndexRequest); StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_open"); assertThat(endpoint.toString(), equalTo(request.getEndpoint())); assertThat(expectedParams, equalTo(request.getParameters())); @@ -474,7 +445,7 @@ public class RequestTests extends ESTestCase { setRandomMasterTimeout(closeIndexRequest, expectedParams); setRandomIndicesOptions(closeIndexRequest::indicesOptions, closeIndexRequest::indicesOptions, expectedParams); - Request request = Request.closeIndex(closeIndexRequest); + Request request = RequestConverters.closeIndex(closeIndexRequest); StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_close"); assertThat(endpoint.toString(), equalTo(request.getEndpoint())); assertThat(expectedParams, equalTo(request.getParameters())); @@ -542,7 +513,7 @@ public class RequestTests extends ESTestCase { indexRequest.source(builder); } - Request request = Request.index(indexRequest); + Request request = RequestConverters.index(indexRequest); if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) { assertEquals("/" + index + "/" + type + "/" + id + "/_create", request.getEndpoint()); } else if (id != null) { @@ -572,7 +543,7 @@ public class RequestTests extends ESTestCase { } Map expectedParams = new HashMap<>(); setRandomIndicesOptions(refreshRequest::indicesOptions, refreshRequest::indicesOptions, expectedParams); - Request request = Request.refresh(refreshRequest); + Request request = RequestConverters.refresh(refreshRequest); StringJoiner endpoint = new StringJoiner("/", "/", ""); if (indices != null && indices.length > 0) { endpoint.add(String.join(",", indices)); @@ -604,7 +575,7 @@ public class RequestTests extends ESTestCase { } expectedParams.put("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing())); - Request request = Request.flush(flushRequest); + Request request = RequestConverters.flush(flushRequest); StringJoiner endpoint = new StringJoiner("/", "/", ""); if (indices != null && indices.length > 0) { endpoint.add(String.join(",", indices)); @@ -641,7 +612,7 @@ public class RequestTests extends ESTestCase { } expectedParams.put("flush", Boolean.toString(forceMergeRequest.flush())); - Request request = Request.forceMerge(forceMergeRequest); + Request request = RequestConverters.forceMerge(forceMergeRequest); StringJoiner endpoint = new StringJoiner("/", "/", ""); if (indices != null && indices.length > 0) { endpoint.add(String.join(",", indices)); @@ -681,7 +652,7 @@ public class RequestTests extends ESTestCase { expectedParams.put("fields", String.join(",", clearIndicesCacheRequest.fields())); } - Request request = Request.clearCache(clearIndicesCacheRequest); + Request request = RequestConverters.clearCache(clearIndicesCacheRequest); StringJoiner endpoint = new StringJoiner("/", "/", ""); if (indices != null && indices.length > 0) { endpoint.add(String.join(",", indices)); @@ -754,7 +725,7 @@ public class RequestTests extends ESTestCase { randomizeFetchSourceContextParams(updateRequest::fetchSource, expectedParams); } - Request request = Request.update(updateRequest); + Request request = RequestConverters.update(updateRequest); assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); @@ -791,7 +762,7 @@ public class RequestTests extends ESTestCase { UpdateRequest updateRequest = new UpdateRequest(); updateRequest.doc(new IndexRequest().source(singletonMap("field", "doc"), XContentType.JSON)); updateRequest.upsert(new IndexRequest().source(singletonMap("field", "upsert"), XContentType.YAML)); - Request.update(updateRequest); + RequestConverters.update(updateRequest); }); assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents", exception.getMessage()); @@ -859,7 +830,7 @@ public class RequestTests extends ESTestCase { bulkRequest.add(docWriteRequest); } - Request request = Request.bulk(bulkRequest); + Request request = RequestConverters.bulk(bulkRequest); assertEquals("/_bulk", request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); @@ -914,7 +885,7 @@ public class RequestTests extends ESTestCase { bulkRequest.add(new UpdateRequest("index", "type", "1").script(mockScript("test"))); bulkRequest.add(new DeleteRequest("index", "type", "2")); - Request request = Request.bulk(bulkRequest); + Request request = RequestConverters.bulk(bulkRequest); assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } { @@ -924,7 +895,7 @@ public class RequestTests extends ESTestCase { bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), xContentType)); bulkRequest.add(new DeleteRequest("index", "type", "2")); - Request request = Request.bulk(bulkRequest); + Request request = RequestConverters.bulk(bulkRequest); assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } { @@ -936,14 +907,14 @@ public class RequestTests extends ESTestCase { updateRequest.upsert(new IndexRequest().source(singletonMap("field", "value"), xContentType)); } - Request request = Request.bulk(new BulkRequest().add(updateRequest)); + Request request = RequestConverters.bulk(new BulkRequest().add(updateRequest)); assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } { BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE)); bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); assertEquals("Mismatching content-type found for request with content-type [JSON], " + "previous requests have content-type [SMILE]", exception.getMessage()); } @@ -957,7 +928,7 @@ public class RequestTests extends ESTestCase { .doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON)) .upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE)) ); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); assertEquals("Mismatching content-type found for request with content-type [SMILE], " + "previous requests have content-type [JSON]", exception.getMessage()); } @@ -970,7 +941,7 @@ public class RequestTests extends ESTestCase { bulkRequest.add(new DeleteRequest("index", "type", "3")); bulkRequest.add(new IndexRequest("index", "type", "4").source(singletonMap("field", "value"), XContentType.JSON)); bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), xContentType)); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); assertEquals("Unsupported content-type found for request with content-type [" + xContentType + "], only JSON and SMILE are supported", exception.getMessage()); } @@ -978,7 +949,7 @@ public class RequestTests extends ESTestCase { public void testSearchNullSource() throws IOException { SearchRequest searchRequest = new SearchRequest(); - Request request = Request.search(searchRequest); + Request request = RequestConverters.search(searchRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals("/_search", request.getEndpoint()); assertNull(request.getEntity()); @@ -1073,7 +1044,7 @@ public class RequestTests extends ESTestCase { searchRequest.source(searchSourceBuilder); } - Request request = Request.search(searchRequest); + Request request = RequestConverters.search(searchRequest); StringJoiner endpoint = new StringJoiner("/", "/", ""); String index = String.join(",", indices); if (Strings.hasLength(index)) { @@ -1127,7 +1098,7 @@ public class RequestTests extends ESTestCase { expectedParams.put("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests())); } - Request request = Request.multiSearch(multiSearchRequest); + Request request = RequestConverters.multiSearch(multiSearchRequest); assertEquals("/_msearch", request.getEndpoint()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(expectedParams, request.getParameters()); @@ -1152,7 +1123,7 @@ public class RequestTests extends ESTestCase { if (randomBoolean()) { searchScrollRequest.scroll(randomPositiveTimeValue()); } - Request request = Request.searchScroll(searchScrollRequest); + Request request = RequestConverters.searchScroll(searchScrollRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals("/_search/scroll", request.getEndpoint()); assertEquals(0, request.getParameters().size()); @@ -1166,7 +1137,7 @@ public class RequestTests extends ESTestCase { for (int i = 0; i < numScrolls; i++) { clearScrollRequest.addScrollId(randomAlphaOfLengthBetween(5, 10)); } - Request request = Request.clearScroll(clearScrollRequest); + Request request = RequestConverters.clearScroll(clearScrollRequest); assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); assertEquals("/_search/scroll", request.getEndpoint()); assertEquals(0, request.getParameters().size()); @@ -1191,7 +1162,7 @@ public class RequestTests extends ESTestCase { setRandomLocal(getAliasesRequest, expectedParams); setRandomIndicesOptions(getAliasesRequest::indicesOptions, getAliasesRequest::indicesOptions, expectedParams); - Request request = Request.existsAlias(getAliasesRequest); + Request request = RequestConverters.existsAlias(getAliasesRequest); StringJoiner expectedEndpoint = new StringJoiner("/", "/", ""); if (indices != null && indices.length > 0) { expectedEndpoint.add(String.join(",", indices)); @@ -1209,13 +1180,15 @@ public class RequestTests extends ESTestCase { public void testExistsAliasNoAliasNoIndex() { { GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.existsAlias(getAliasesRequest)); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> + RequestConverters.existsAlias(getAliasesRequest)); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); } { GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[])null); getAliasesRequest.indices((String[])null); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.existsAlias(getAliasesRequest)); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> + RequestConverters.existsAlias(getAliasesRequest)); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); } } @@ -1234,7 +1207,7 @@ public class RequestTests extends ESTestCase { fieldCapabilitiesRequest::indicesOptions, indicesOptionsParams); - Request request = Request.fieldCaps(fieldCapabilitiesRequest); + Request request = RequestConverters.fieldCaps(fieldCapabilitiesRequest); // Verify that the resulting REST request looks as expected. StringJoiner endpoint = new StringJoiner("/", "/", ""); @@ -1270,7 +1243,7 @@ public class RequestTests extends ESTestCase { Map expectedParams = new HashMap<>(); setRandomIndicesOptions(rankEvalRequest::indicesOptions, rankEvalRequest::indicesOptions, expectedParams); - Request request = Request.rankEval(rankEvalRequest); + Request request = RequestConverters.rankEval(rankEvalRequest); StringJoiner endpoint = new StringJoiner("/", "/", ""); String index = String.join(",", indices); if (Strings.hasLength(index)) { @@ -1284,25 +1257,25 @@ public class RequestTests extends ESTestCase { } public void testSplit() throws IOException { - resizeTest(ResizeType.SPLIT, Request::split); + resizeTest(ResizeType.SPLIT, RequestConverters::split); } public void testSplitWrongResizeType() { ResizeRequest resizeRequest = new ResizeRequest("target", "source"); resizeRequest.setResizeType(ResizeType.SHRINK); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.split(resizeRequest)); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> RequestConverters.split(resizeRequest)); assertEquals("Wrong resize type [SHRINK] for indices split request", iae.getMessage()); } public void testShrinkWrongResizeType() { ResizeRequest resizeRequest = new ResizeRequest("target", "source"); resizeRequest.setResizeType(ResizeType.SPLIT); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.shrink(resizeRequest)); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> RequestConverters.shrink(resizeRequest)); assertEquals("Wrong resize type [SPLIT] for indices shrink request", iae.getMessage()); } public void testShrink() throws IOException { - resizeTest(ResizeType.SHRINK, Request::shrink); + resizeTest(ResizeType.SHRINK, RequestConverters::shrink); } private static void resizeTest(ResizeType resizeType, CheckedFunction function) @@ -1341,7 +1314,7 @@ public class RequestTests extends ESTestCase { setRandomMasterTimeout(request, expectedParams); setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - Request expectedRequest = Request.clusterPutSettings(request); + Request expectedRequest = RequestConverters.clusterPutSettings(request); assertEquals("/_cluster/settings", expectedRequest.getEndpoint()); assertEquals(HttpPut.METHOD_NAME, expectedRequest.getMethod()); assertEquals(expectedParams, expectedRequest.getParameters()); @@ -1374,7 +1347,7 @@ public class RequestTests extends ESTestCase { } setRandomWaitForActiveShards(rolloverRequest.getCreateIndexRequest()::waitForActiveShards, expectedParams); - Request request = Request.rollover(rolloverRequest); + Request request = RequestConverters.rollover(rolloverRequest); if (rolloverRequest.getNewIndexName() == null) { assertEquals("/" + rolloverRequest.getAlias() + "/_rollover", request.getEndpoint()); } else { @@ -1399,7 +1372,7 @@ public class RequestTests extends ESTestCase { } } - Request request = Request.indexPutSettings(updateSettingsRequest); + Request request = RequestConverters.indexPutSettings(updateSettingsRequest); StringJoiner endpoint = new StringJoiner("/", "/", ""); if (indices != null && indices.length > 0) { endpoint.add(String.join(",", indices)); @@ -1417,143 +1390,115 @@ public class RequestTests extends ESTestCase { assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity))); } - public void testParams() { - final int nbParams = randomIntBetween(0, 10); - Request.Params params = Request.Params.builder(); - Map expectedParams = new HashMap<>(); - for (int i = 0; i < nbParams; i++) { - String paramName = "p_" + i; - String paramValue = randomAlphaOfLength(5); - params.putParam(paramName, paramValue); - expectedParams.put(paramName, paramValue); - } - - Map requestParams = params.getParams(); - assertEquals(nbParams, requestParams.size()); - assertEquals(expectedParams, requestParams); - } - - public void testParamsNoDuplicates() { - Request.Params params = Request.Params.builder(); - params.putParam("test", "1"); - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> params.putParam("test", "2")); - assertEquals("Request parameter [test] is already registered", e.getMessage()); - - Map requestParams = params.getParams(); - assertEquals(1L, requestParams.size()); - assertEquals("1", requestParams.values().iterator().next()); - } - public void testEndpointBuilder() { { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder(); + EndpointBuilder endpointBuilder = new EndpointBuilder(); assertEquals("/", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart(Strings.EMPTY_ARRAY); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart(Strings.EMPTY_ARRAY); assertEquals("/", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart(""); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart(""); assertEquals("/", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("a", "b"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b"); assertEquals("/a/b", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("a").addPathPart("b") + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b") .addPathPartAsIs("_create"); assertEquals("/a/b/_create", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("a", "b", "c") + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c") .addPathPartAsIs("_create"); assertEquals("/a/b/c/_create", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("a").addPathPartAsIs("_create"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPartAsIs("_create"); assertEquals("/a/_create", endpointBuilder.build()); } } public void testEndpointBuilderEncodeParts() { { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("-#index1,index#2", "type", "id"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("-#index1,index#2", "type", "id"); assertEquals("/-%23index1,index%232/type/id", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("index", "type#2", "id"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("index", "type#2", "id"); assertEquals("/index/type%232/id", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("index", "type", "this/is/the/id"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("index", "type", "this/is/the/id"); assertEquals("/index/type/this%2Fis%2Fthe%2Fid", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("index", "type", "this|is|the|id"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("index", "type", "this|is|the|id"); assertEquals("/index/type/this%7Cis%7Cthe%7Cid", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("index", "type", "id#1"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("index", "type", "id#1"); assertEquals("/index/type/id%231", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("", "_search"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("", "_search"); assertEquals("/%3Clogstash-%7Bnow%2FM%7D%3E/_search", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("中文"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("中文"); assertEquals("/中文", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo bar"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo bar"); assertEquals("/foo%20bar", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo+bar"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo+bar"); assertEquals("/foo+bar", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo+bar"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo+bar"); assertEquals("/foo+bar", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo/bar"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo/bar"); assertEquals("/foo%2Fbar", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo^bar"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo^bar"); assertEquals("/foo%5Ebar", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("cluster1:index1,index2") + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("cluster1:index1,index2") .addPathPartAsIs("_search"); assertEquals("/cluster1:index1,index2/_search", endpointBuilder.build()); } { - Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder() + EndpointBuilder endpointBuilder = new EndpointBuilder() .addCommaSeparatedPathParts(new String[]{"index1", "index2"}).addPathPartAsIs("cache/clear"); assertEquals("/index1,index2/cache/clear", endpointBuilder.build()); } } public void testEndpoint() { - assertEquals("/index/type/id", Request.endpoint("index", "type", "id")); - assertEquals("/index/type/id/_endpoint", Request.endpoint("index", "type", "id", "_endpoint")); - assertEquals("/index1,index2", Request.endpoint(new String[]{"index1", "index2"})); - assertEquals("/index1,index2/_endpoint", Request.endpoint(new String[]{"index1", "index2"}, "_endpoint")); - assertEquals("/index1,index2/type1,type2/_endpoint", Request.endpoint(new String[]{"index1", "index2"}, + assertEquals("/index/type/id", RequestConverters.endpoint("index", "type", "id")); + assertEquals("/index/type/id/_endpoint", RequestConverters.endpoint("index", "type", "id", "_endpoint")); + assertEquals("/index1,index2", RequestConverters.endpoint(new String[]{"index1", "index2"})); + assertEquals("/index1,index2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"}, "_endpoint")); + assertEquals("/index1,index2/type1,type2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"}, new String[]{"type1", "type2"}, "_endpoint")); - assertEquals("/index1,index2/_endpoint/suffix1,suffix2", Request.endpoint(new String[]{"index1", "index2"}, + assertEquals("/index1,index2/_endpoint/suffix1,suffix2", RequestConverters.endpoint(new String[]{"index1", "index2"}, "_endpoint", new String[]{"suffix1", "suffix2"})); } public void testCreateContentType() { final XContentType xContentType = randomFrom(XContentType.values()); - assertEquals(xContentType.mediaTypeWithoutParameters(), Request.createContentType(xContentType).getMimeType()); + assertEquals(xContentType.mediaTypeWithoutParameters(), RequestConverters.createContentType(xContentType).getMimeType()); } public void testEnforceSameContentType() { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index b8315bd59fa..5ca9b05f73a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -94,14 +94,7 @@ import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.client.RestClientTestUtil.randomHeaders; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.mockito.Matchers.anyMapOf; -import static org.mockito.Matchers.anyObject; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.anyVararg; -import static org.mockito.Matchers.argThat; -import static org.mockito.Matchers.eq; -import static org.mockito.Matchers.isNotNull; -import static org.mockito.Matchers.isNull; +import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -134,31 +127,22 @@ public class RestHighLevelClientTests extends ESTestCase { Header[] headers = randomHeaders(random(), "Header"); Response response = mock(Response.class); when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.OK)); - when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), - anyObject(), anyVararg())).thenReturn(response); + when(restClient.performRequest(any(Request.class))).thenReturn(response); assertTrue(restHighLevelClient.ping(headers)); - verify(restClient).performRequest(eq(HttpHead.METHOD_NAME), eq("/"), eq(Collections.emptyMap()), - isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers))); } public void testPing404NotFound() throws IOException { Header[] headers = randomHeaders(random(), "Header"); Response response = mock(Response.class); when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.NOT_FOUND)); - when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), - anyObject(), anyVararg())).thenReturn(response); + when(restClient.performRequest(any(Request.class))).thenReturn(response); assertFalse(restHighLevelClient.ping(headers)); - verify(restClient).performRequest(eq(HttpHead.METHOD_NAME), eq("/"), eq(Collections.emptyMap()), - isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers))); } public void testPingSocketTimeout() throws IOException { Header[] headers = randomHeaders(random(), "Header"); - when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), - anyObject(), anyVararg())).thenThrow(new SocketTimeoutException()); + when(restClient.performRequest(any(Request.class))).thenThrow(new SocketTimeoutException()); expectThrows(SocketTimeoutException.class, () -> restHighLevelClient.ping(headers)); - verify(restClient).performRequest(eq(HttpHead.METHOD_NAME), eq("/"), eq(Collections.emptyMap()), - isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers))); } public void testInfo() throws IOException { @@ -168,8 +152,6 @@ public class RestHighLevelClientTests extends ESTestCase { mockResponse(testInfo); MainResponse receivedInfo = restHighLevelClient.info(headers); assertEquals(testInfo, receivedInfo); - verify(restClient).performRequest(eq(HttpGet.METHOD_NAME), eq("/"), eq(Collections.emptyMap()), - isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers))); } public void testSearchScroll() throws IOException { @@ -185,8 +167,6 @@ public class RestHighLevelClientTests extends ESTestCase { assertEquals(5, searchResponse.getTotalShards()); assertEquals(5, searchResponse.getSuccessfulShards()); assertEquals(100, searchResponse.getTook().getMillis()); - verify(restClient).performRequest(eq(HttpPost.METHOD_NAME), eq("/_search/scroll"), eq(Collections.emptyMap()), - isNotNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers))); } public void testClearScroll() throws IOException { @@ -198,17 +178,14 @@ public class RestHighLevelClientTests extends ESTestCase { ClearScrollResponse clearScrollResponse = restHighLevelClient.clearScroll(clearScrollRequest, headers); assertEquals(mockClearScrollResponse.isSucceeded(), clearScrollResponse.isSucceeded()); assertEquals(mockClearScrollResponse.getNumFreed(), clearScrollResponse.getNumFreed()); - verify(restClient).performRequest(eq(HttpDelete.METHOD_NAME), eq("/_search/scroll"), eq(Collections.emptyMap()), - isNotNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers))); } private void mockResponse(ToXContent toXContent) throws IOException { Response response = mock(Response.class); - ContentType contentType = ContentType.parse(Request.REQUEST_BODY_CONTENT_TYPE.mediaType()); - String requestBody = toXContent(toXContent, Request.REQUEST_BODY_CONTENT_TYPE, false).utf8ToString(); + ContentType contentType = ContentType.parse(RequestConverters.REQUEST_BODY_CONTENT_TYPE.mediaType()); + String requestBody = toXContent(toXContent, RequestConverters.REQUEST_BODY_CONTENT_TYPE, false).utf8ToString(); when(response.getEntity()).thenReturn(new NStringEntity(requestBody, contentType)); - when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), - anyObject(), anyVararg())).thenReturn(response); + when(restClient.performRequest(any(Request.class))).thenReturn(response); } public void testRequestValidation() { @@ -336,13 +313,11 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnSuccess() throws IOException { MainRequest mainRequest = new MainRequest(); - CheckedFunction requestConverter = request -> - new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); - when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), - anyObject(), anyVararg())).thenReturn(mockResponse); + when(restClient.performRequest(any(Request.class))).thenReturn(mockResponse); { Integer result = restHighLevelClient.performRequest(mainRequest, requestConverter, response -> response.getStatusLine().getStatusCode(), Collections.emptySet()); @@ -358,14 +333,12 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOException { MainRequest mainRequest = new MainRequest(); - CheckedFunction requestConverter = request -> - new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); - when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), - anyObject(), anyVararg())).thenThrow(responseException); + when(restClient.performRequest(any(Request.class))).thenThrow(responseException); ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> restHighLevelClient.performRequest(mainRequest, requestConverter, response -> response.getStatusLine().getStatusCode(), Collections.emptySet())); @@ -376,16 +349,14 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithEntity() throws IOException { MainRequest mainRequest = new MainRequest(); - CheckedFunction requestConverter = request -> - new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); - when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), - anyObject(), anyVararg())).thenThrow(responseException); + when(restClient.performRequest(any(Request.class))).thenThrow(responseException); ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> restHighLevelClient.performRequest(mainRequest, requestConverter, response -> response.getStatusLine().getStatusCode(), Collections.emptySet())); @@ -396,15 +367,13 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOException { MainRequest mainRequest = new MainRequest(); - CheckedFunction requestConverter = request -> - new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); - when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), - anyObject(), anyVararg())).thenThrow(responseException); + when(restClient.performRequest(any(Request.class))).thenThrow(responseException); ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> restHighLevelClient.performRequest(mainRequest, requestConverter, response -> response.getStatusLine().getStatusCode(), Collections.emptySet())); @@ -416,15 +385,13 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOException { MainRequest mainRequest = new MainRequest(); - CheckedFunction requestConverter = request -> - new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); - when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), - anyObject(), anyVararg())).thenThrow(responseException); + when(restClient.performRequest(any(Request.class))).thenThrow(responseException); ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> restHighLevelClient.performRequest(mainRequest, requestConverter, response -> response.getStatusLine().getStatusCode(), Collections.emptySet())); @@ -436,13 +403,11 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException { MainRequest mainRequest = new MainRequest(); - CheckedFunction requestConverter = request -> - new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); - when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), - anyObject(), anyVararg())).thenThrow(responseException); + when(restClient.performRequest(any(Request.class))).thenThrow(responseException); //although we got an exception, we turn it into a successful response because the status code was provided among ignores assertEquals(Integer.valueOf(404), restHighLevelClient.performRequest(mainRequest, requestConverter, response -> response.getStatusLine().getStatusCode(), Collections.singleton(404))); @@ -450,13 +415,11 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException { MainRequest mainRequest = new MainRequest(); - CheckedFunction requestConverter = request -> - new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); - when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), - anyObject(), anyVararg())).thenThrow(responseException); + when(restClient.performRequest(any(Request.class))).thenThrow(responseException); ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> restHighLevelClient.performRequest(mainRequest, requestConverter, response -> {throw new IllegalStateException();}, Collections.singleton(404))); @@ -467,15 +430,13 @@ public class RestHighLevelClientTests extends ESTestCase { public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException { MainRequest mainRequest = new MainRequest(); - CheckedFunction requestConverter = request -> - new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); - when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), - anyObject(), anyVararg())).thenThrow(responseException); + when(restClient.performRequest(any(Request.class))).thenThrow(responseException); ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> restHighLevelClient.performRequest(mainRequest, requestConverter, response -> {throw new IllegalStateException();}, Collections.singleton(404))); @@ -696,23 +657,6 @@ public class RestHighLevelClientTests extends ESTestCase { } } - private static class HeadersVarargMatcher extends ArgumentMatcher implements VarargMatcher { - private Header[] expectedHeaders; - - HeadersVarargMatcher(Header... expectedHeaders) { - this.expectedHeaders = expectedHeaders; - } - - @Override - public boolean matches(Object varargArgument) { - if (varargArgument instanceof Header[]) { - Header[] actualHeaders = (Header[]) varargArgument; - return new ArrayEquals(expectedHeaders).matches(actualHeaders); - } - return false; - } - } - private static StatusLine newStatusLine(RestStatus restStatus) { return new BasicStatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name()); } diff --git a/client/rest/src/main/java/org/elasticsearch/client/Request.java b/client/rest/src/main/java/org/elasticsearch/client/Request.java new file mode 100644 index 00000000000..bf0012339fb --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/Request.java @@ -0,0 +1,202 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.unmodifiableMap; + +/** + * HTTP Request to Elasticsearch. + */ +public final class Request { + private static final Header[] NO_HEADERS = new Header[0]; + private final String method; + private final String endpoint; + private final Map parameters = new HashMap<>(); + + private HttpEntity entity; + private Header[] headers = NO_HEADERS; + private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory = + HttpAsyncResponseConsumerFactory.DEFAULT; + + /** + * Create the {@linkplain Request}. + * @param method the HTTP method + * @param endpoint the path of the request (without scheme, host, port, or prefix) + */ + public Request(String method, String endpoint) { + this.method = Objects.requireNonNull(method, "method cannot be null"); + this.endpoint = Objects.requireNonNull(endpoint, "endpoint cannot be null"); + } + + /** + * The HTTP method. + */ + public String getMethod() { + return method; + } + + /** + * The path of the request (without scheme, host, port, or prefix). + */ + public String getEndpoint() { + return endpoint; + } + + /** + * Add a query string parameter. + * @param name the name of the url parameter. Must not be null. + * @param value the value of the url url parameter. If {@code null} then + * the parameter is sent as {@code name} rather than {@code name=value} + * @throws IllegalArgumentException if a parameter with that name has + * already been set + */ + public void addParameter(String name, String value) { + Objects.requireNonNull(name, "url parameter name cannot be null"); + // .putIfAbsent(name, value) except we are in Java 7 which doesn't have that. + if (parameters.containsKey(name)) { + throw new IllegalArgumentException("url parameter [" + name + "] has already been set to [" + parameters.get(name) + "]"); + } else { + parameters.put(name, value); + } + } + + /** + * Query string parameters. The returned map is an unmodifiable view of the + * map in the request so calls to {@link #addParameter(String, String)} + * will change it. + */ + public Map getParameters() { + return unmodifiableMap(parameters); + } + + /** + * Set the body of the request. If not set or set to {@code null} then no + * body is sent with the request. + */ + public void setEntity(HttpEntity entity) { + this.entity = entity; + } + + /** + * The body of the request. If {@code null} then no body + * is sent with the request. + */ + public HttpEntity getEntity() { + return entity; + } + + /** + * Set the headers to attach to the request. + */ + public void setHeaders(Header... headers) { + Objects.requireNonNull(headers, "headers cannot be null"); + for (Header header : headers) { + Objects.requireNonNull(header, "header cannot be null"); + } + this.headers = headers; + } + + /** + * Headers to attach to the request. + */ + public Header[] getHeaders() { + return headers; + } + + /** + * set the {@link HttpAsyncResponseConsumerFactory} used to create one + * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the + * response body gets streamed from a non-blocking HTTP connection on the + * client side. + */ + public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { + this.httpAsyncResponseConsumerFactory = + Objects.requireNonNull(httpAsyncResponseConsumerFactory, "httpAsyncResponseConsumerFactory cannot be null"); + } + + /** + * The {@link HttpAsyncResponseConsumerFactory} used to create one + * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the + * response body gets streamed from a non-blocking HTTP connection on the + * client side. + */ + public HttpAsyncResponseConsumerFactory getHttpAsyncResponseConsumerFactory() { + return httpAsyncResponseConsumerFactory; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + b.append("Request{"); + b.append("method='").append(method).append('\''); + b.append(", endpoint='").append(endpoint).append('\''); + if (false == parameters.isEmpty()) { + b.append(", params=").append(parameters); + } + if (entity != null) { + b.append(", entity=").append(entity); + } + if (headers.length > 0) { + b.append(", headers="); + for (int h = 0; h < headers.length; h++) { + if (h != 0) { + b.append(','); + } + b.append(headers[h].toString()); + } + } + if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { + b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory); + } + return b.append('}').toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || (obj.getClass() != getClass())) { + return false; + } + if (obj == this) { + return true; + } + + Request other = (Request) obj; + return method.equals(other.method) + && endpoint.equals(other.endpoint) + && parameters.equals(other.parameters) + && Objects.equals(entity, other.entity) + && Arrays.equals(headers, other.headers) + && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory); + } + + @Override + public int hashCode() { + return Objects.hash(method, endpoint, parameters, entity, Arrays.hashCode(headers), httpAsyncResponseConsumerFactory); + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 48349c38589..1d4036c2103 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -143,6 +143,61 @@ public class RestClient implements Closeable { this.blacklist.clear(); } + /** + * Sends a request to the Elasticsearch cluster that the client points to. + * Blocks until the request is completed and returns its response or fails + * by throwing an exception. Selects a host out of the provided ones in a + * round-robin fashion. Failing hosts are marked dead and retried after a + * certain amount of time (minimum 1 minute, maximum 30 minutes), depending + * on how many times they previously failed (the more failures, the later + * they will be retried). In case of failures all of the alive nodes (or + * dead nodes that deserve a retry) are retried until one responds or none + * of them does, in which case an {@link IOException} will be thrown. + * + * This method works by performing an asynchronous call and waiting + * for the result. If the asynchronous call throws an exception we wrap + * it and rethrow it so that the stack trace attached to the exception + * contains the call site. While we attempt to preserve the original + * exception this isn't always possible and likely haven't covered all of + * the cases. You can get the original exception from + * {@link Exception#getCause()}. + * + * @param request the request to perform + * @return the response returned by Elasticsearch + * @throws IOException in case of a problem or the connection was aborted + * @throws ClientProtocolException in case of an http protocol error + * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error + */ + public Response performRequest(Request request) throws IOException { + SyncResponseListener listener = new SyncResponseListener(maxRetryTimeoutMillis); + performRequestAsyncNoCatch(request, listener); + return listener.get(); + } + + /** + * Sends a request to the Elasticsearch cluster that the client points to. + * The request is executed asynchronously and the provided + * {@link ResponseListener} gets notified upon request completion or + * failure. Selects a host out of the provided ones in a round-robin + * fashion. Failing hosts are marked dead and retried after a certain + * amount of time (minimum 1 minute, maximum 30 minutes), depending on how + * many times they previously failed (the more failures, the later they + * will be retried). In case of failures all of the alive nodes (or dead + * nodes that deserve a retry) are retried until one responds or none of + * them does, in which case an {@link IOException} will be thrown. + * + * @param request the request to perform + * @param responseListener the {@link ResponseListener} to notify when the + * request is completed or fails + */ + public void performRequestAsync(Request request, ResponseListener responseListener) { + try { + performRequestAsyncNoCatch(request, responseListener); + } catch (Exception e) { + responseListener.onFailure(e); + } + } + /** * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters @@ -157,7 +212,9 @@ public class RestClient implements Closeable { * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error */ public Response performRequest(String method, String endpoint, Header... headers) throws IOException { - return performRequest(method, endpoint, Collections.emptyMap(), null, headers); + Request request = new Request(method, endpoint); + request.setHeaders(headers); + return performRequest(request); } /** @@ -174,7 +231,10 @@ public class RestClient implements Closeable { * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error */ public Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { - return performRequest(method, endpoint, params, (HttpEntity)null, headers); + Request request = new Request(method, endpoint); + addParameters(request, params); + request.setHeaders(headers); + return performRequest(request); } /** @@ -195,7 +255,11 @@ public class RestClient implements Closeable { */ public Response performRequest(String method, String endpoint, Map params, HttpEntity entity, Header... headers) throws IOException { - return performRequest(method, endpoint, params, entity, HttpAsyncResponseConsumerFactory.DEFAULT, headers); + Request request = new Request(method, endpoint); + addParameters(request, params); + request.setEntity(entity); + request.setHeaders(headers); + return performRequest(request); } /** @@ -229,10 +293,12 @@ public class RestClient implements Closeable { public Response performRequest(String method, String endpoint, Map params, HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, Header... headers) throws IOException { - SyncResponseListener listener = new SyncResponseListener(maxRetryTimeoutMillis); - performRequestAsyncNoCatch(method, endpoint, params, entity, httpAsyncResponseConsumerFactory, - listener, headers); - return listener.get(); + Request request = new Request(method, endpoint); + addParameters(request, params); + request.setEntity(entity); + request.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory); + request.setHeaders(headers); + return performRequest(request); } /** @@ -246,7 +312,15 @@ public class RestClient implements Closeable { * @param headers the optional request headers */ public void performRequestAsync(String method, String endpoint, ResponseListener responseListener, Header... headers) { - performRequestAsync(method, endpoint, Collections.emptyMap(), null, responseListener, headers); + Request request; + try { + request = new Request(method, endpoint); + request.setHeaders(headers); + } catch (Exception e) { + responseListener.onFailure(e); + return; + } + performRequestAsync(request, responseListener); } /** @@ -262,7 +336,16 @@ public class RestClient implements Closeable { */ public void performRequestAsync(String method, String endpoint, Map params, ResponseListener responseListener, Header... headers) { - performRequestAsync(method, endpoint, params, null, responseListener, headers); + Request request; + try { + request = new Request(method, endpoint); + addParameters(request, params); + request.setHeaders(headers); + } catch (Exception e) { + responseListener.onFailure(e); + return; + } + performRequestAsync(request, responseListener); } /** @@ -281,7 +364,17 @@ public class RestClient implements Closeable { */ public void performRequestAsync(String method, String endpoint, Map params, HttpEntity entity, ResponseListener responseListener, Header... headers) { - performRequestAsync(method, endpoint, params, entity, HttpAsyncResponseConsumerFactory.DEFAULT, responseListener, headers); + Request request; + try { + request = new Request(method, endpoint); + addParameters(request, params); + request.setEntity(entity); + request.setHeaders(headers); + } catch (Exception e) { + responseListener.onFailure(e); + return; + } + performRequestAsync(request, responseListener); } /** @@ -305,24 +398,27 @@ public class RestClient implements Closeable { public void performRequestAsync(String method, String endpoint, Map params, HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, ResponseListener responseListener, Header... headers) { + Request request; try { - performRequestAsyncNoCatch(method, endpoint, params, entity, httpAsyncResponseConsumerFactory, - responseListener, headers); + request = new Request(method, endpoint); + addParameters(request, params); + request.setEntity(entity); + request.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory); + request.setHeaders(headers); } catch (Exception e) { responseListener.onFailure(e); + return; } + performRequestAsync(request, responseListener); } - void performRequestAsyncNoCatch(String method, String endpoint, Map params, - HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, - ResponseListener responseListener, Header... headers) { - Objects.requireNonNull(params, "params must not be null"); - Map requestParams = new HashMap<>(params); + void performRequestAsyncNoCatch(Request request, ResponseListener listener) { + Map requestParams = new HashMap<>(request.getParameters()); //ignore is a special parameter supported by the clients, shouldn't be sent to es String ignoreString = requestParams.remove("ignore"); Set ignoreErrorCodes; if (ignoreString == null) { - if (HttpHead.METHOD_NAME.equals(method)) { + if (HttpHead.METHOD_NAME.equals(request.getMethod())) { //404 never causes error if returned for a HEAD request ignoreErrorCodes = Collections.singleton(404); } else { @@ -331,7 +427,7 @@ public class RestClient implements Closeable { } else { String[] ignoresArray = ignoreString.split(","); ignoreErrorCodes = new HashSet<>(); - if (HttpHead.METHOD_NAME.equals(method)) { + if (HttpHead.METHOD_NAME.equals(request.getMethod())) { //404 never causes error if returned for a HEAD request ignoreErrorCodes.add(404); } @@ -343,13 +439,13 @@ public class RestClient implements Closeable { } } } - URI uri = buildUri(pathPrefix, endpoint, requestParams); - HttpRequestBase request = createHttpRequest(method, uri, entity); - setHeaders(request, headers); - FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener); + URI uri = buildUri(pathPrefix, request.getEndpoint(), requestParams); + HttpRequestBase httpRequest = createHttpRequest(request.getMethod(), uri, request.getEntity()); + setHeaders(httpRequest, request.getHeaders()); + FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(listener); long startTime = System.nanoTime(); - performRequestAsync(startTime, nextHost(), request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, - failureTrackingResponseListener); + performRequestAsync(startTime, nextHost(), httpRequest, ignoreErrorCodes, + request.getHttpAsyncResponseConsumerFactory(), failureTrackingResponseListener); } private void performRequestAsync(final long startTime, final HostTuple> hostTuple, final HttpRequestBase request, @@ -428,11 +524,9 @@ public class RestClient implements Closeable { } private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) { - Objects.requireNonNull(requestHeaders, "request headers must not be null"); // request headers override default headers, so we don't add default headers if they exist as request headers final Set requestNames = new HashSet<>(requestHeaders.length); for (Header requestHeader : requestHeaders) { - Objects.requireNonNull(requestHeader, "request header must not be null"); httpRequest.addHeader(requestHeader); requestNames.add(requestHeader.getName()); } @@ -766,4 +860,15 @@ public class RestClient implements Closeable { this.authCache = authCache; } } + + /** + * Add all parameters from a map to a {@link Request}. This only exists + * to support methods that exist for backwards compatibility. + */ + private static void addParameters(Request request, Map parameters) { + Objects.requireNonNull(parameters, "parameters cannot be null"); + for (Map.Entry entry : parameters.entrySet()) { + request.addParameter(entry.getKey(), entry.getValue()); + } + } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java new file mode 100644 index 00000000000..98fcf8421ae --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +public class RequestTests extends RestClientTestCase { + public void testConstructor() { + final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"}); + final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); + + try { + new Request(null, endpoint); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("method cannot be null", e.getMessage()); + } + + try { + new Request(method, null); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("endpoint cannot be null", e.getMessage()); + } + + final Request request = new Request(method, endpoint); + assertEquals(method, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + } + + public void testAddParameters() { + final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"}); + final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); + int parametersCount = between(1, 3); + final Map parameters = new HashMap<>(parametersCount); + while (parameters.size() < parametersCount) { + parameters.put(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5)); + } + Request request = new Request(method, endpoint); + + try { + request.addParameter(null, "value"); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("url parameter name cannot be null", e.getMessage()); + } + + for (Map.Entry entry : parameters.entrySet()) { + request.addParameter(entry.getKey(), entry.getValue()); + } + assertEquals(parameters, request.getParameters()); + + // Test that adding parameters with a null value is ok. + request.addParameter("is_null", null); + parameters.put("is_null", null); + assertEquals(parameters, request.getParameters()); + + // Test that adding a duplicate parameter fails + String firstValue = randomBoolean() ? null : "value"; + request.addParameter("name", firstValue); + try { + request.addParameter("name", randomBoolean() ? firstValue : "second_value"); + fail("expected failure"); + } catch (IllegalArgumentException e) { + assertEquals("url parameter [name] has already been set to [" + firstValue + "]", e.getMessage()); + } + } + + public void testSetEntity() { + final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"}); + final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); + final HttpEntity entity = + randomBoolean() ? new StringEntity(randomAsciiLettersOfLengthBetween(1, 100), ContentType.TEXT_PLAIN) : null; + Request request = new Request(method, endpoint); + + request.setEntity(entity); + assertEquals(entity, request.getEntity()); + } + + public void testSetHeaders() { + final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"}); + final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); + Request request = new Request(method, endpoint); + + try { + request.setHeaders((Header[]) null); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("headers cannot be null", e.getMessage()); + } + + try { + request.setHeaders(new Header [] {null}); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("header cannot be null", e.getMessage()); + } + + Header[] headers = new Header[between(0, 5)]; + for (int i = 0; i < headers.length; i++) { + headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + } + request.setHeaders(headers); + assertArrayEquals(headers, request.getHeaders()); + } + + // TODO equals and hashcode + +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index 16c192b3977..aa323276404 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -138,7 +138,7 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { final int statusCode = randomBoolean() ? randomOkStatusCode(getRandom()) : randomErrorNoRetryStatusCode(getRandom()); Response response; try { - response = restClient.performRequest(method, "/" + statusCode); + response = restClient.performRequest(new Request(method, "/" + statusCode)); } catch(ResponseException responseException) { response = responseException.getResponse(); } @@ -156,7 +156,7 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { final String method = RestClientTestUtil.randomHttpMethod(getRandom()); //we don't test status codes that are subject to retries as they interfere with hosts being stopped final int statusCode = randomBoolean() ? randomOkStatusCode(getRandom()) : randomErrorNoRetryStatusCode(getRandom()); - restClient.performRequestAsync(method, "/" + statusCode, new ResponseListener() { + restClient.performRequestAsync(new Request(method, "/" + statusCode), new ResponseListener() { @Override public void onSuccess(Response response) { responses.add(new TestResponse(method, statusCode, response)); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 7786eefb97f..2d419b213d6 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -62,6 +62,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; @@ -280,13 +281,17 @@ public class RestClientSingleHostTests extends RestClientTestCase { StringEntity entity = new StringEntity(body, ContentType.APPLICATION_JSON); for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) { for (int okStatusCode : getOkStatusCodes()) { - Response response = restClient.performRequest(method, "/" + okStatusCode, Collections.emptyMap(), entity); + Request request = new Request(method, "/" + okStatusCode); + request.setEntity(entity); + Response response = restClient.performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode)); assertThat(EntityUtils.toString(response.getEntity()), equalTo(body)); } for (int errorStatusCode : getAllErrorStatusCodes()) { + Request request = new Request(method, "/" + errorStatusCode); + request.setEntity(entity); try { - restClient.performRequest(method, "/" + errorStatusCode, Collections.emptyMap(), entity); + restClient.performRequest(request); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); @@ -297,8 +302,10 @@ public class RestClientSingleHostTests extends RestClientTestCase { } } for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) { + Request request = new Request(method, "/" + randomStatusCode(getRandom())); + request.setEntity(entity); try { - restClient.performRequest(method, "/" + randomStatusCode(getRandom()), Collections.emptyMap(), entity); + restClient.performRequest(request); fail("request should have failed"); } catch(UnsupportedOperationException e) { assertThat(e.getMessage(), equalTo(method + " with body is not supported")); @@ -306,7 +313,11 @@ public class RestClientSingleHostTests extends RestClientTestCase { } } - public void testNullHeaders() throws IOException { + /** + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}. + */ + @Deprecated + public void tesPerformRequestOldStyleNullHeaders() throws IOException { String method = randomHttpMethod(getRandom()); int statusCode = randomStatusCode(getRandom()); try { @@ -323,20 +334,24 @@ public class RestClientSingleHostTests extends RestClientTestCase { } } - public void testNullParams() throws IOException { + /** + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}. + */ + @Deprecated + public void testPerformRequestOldStyleWithNullParams() throws IOException { String method = randomHttpMethod(getRandom()); int statusCode = randomStatusCode(getRandom()); try { restClient.performRequest(method, "/" + statusCode, (Map)null); fail("request should have failed"); } catch(NullPointerException e) { - assertEquals("params must not be null", e.getMessage()); + assertEquals("parameters cannot be null", e.getMessage()); } try { restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null); fail("request should have failed"); } catch(NullPointerException e) { - assertEquals("params must not be null", e.getMessage()); + assertEquals("parameters cannot be null", e.getMessage()); } } @@ -348,9 +363,11 @@ public class RestClientSingleHostTests extends RestClientTestCase { for (String method : getHttpMethods()) { final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); + Request request = new Request(method, "/" + statusCode); + request.setHeaders(requestHeaders); Response esResponse; try { - esResponse = restClient.performRequest(method, "/" + statusCode, requestHeaders); + esResponse = restClient.performRequest(request); } catch(ResponseException e) { esResponse = e.getResponse(); } @@ -361,16 +378,15 @@ public class RestClientSingleHostTests extends RestClientTestCase { private HttpUriRequest performRandomRequest(String method) throws Exception { String uriAsString = "/" + randomStatusCode(getRandom()); + Request request = new Request(method, uriAsString); URIBuilder uriBuilder = new URIBuilder(uriAsString); - final Map params = new HashMap<>(); - boolean hasParams = randomBoolean(); - if (hasParams) { + if (randomBoolean()) { int numParams = randomIntBetween(1, 3); for (int i = 0; i < numParams; i++) { - String paramKey = "param-" + i; - String paramValue = randomAsciiOfLengthBetween(3, 10); - params.put(paramKey, paramValue); - uriBuilder.addParameter(paramKey, paramValue); + String name = "param-" + i; + String value = randomAsciiAlphanumOfLengthBetween(3, 10); + request.addParameter(name, value); + uriBuilder.addParameter(name, value); } } if (randomBoolean()) { @@ -379,81 +395,82 @@ public class RestClientSingleHostTests extends RestClientTestCase { if (randomBoolean()) { ignore += "," + Integer.toString(randomFrom(RestClientTestUtil.getAllErrorStatusCodes())); } - params.put("ignore", ignore); + request.addParameter("ignore", ignore); } URI uri = uriBuilder.build(); - HttpUriRequest request; + HttpUriRequest expectedRequest; switch(method) { case "DELETE": - request = new HttpDeleteWithEntity(uri); + expectedRequest = new HttpDeleteWithEntity(uri); break; case "GET": - request = new HttpGetWithEntity(uri); + expectedRequest = new HttpGetWithEntity(uri); break; case "HEAD": - request = new HttpHead(uri); + expectedRequest = new HttpHead(uri); break; case "OPTIONS": - request = new HttpOptions(uri); + expectedRequest = new HttpOptions(uri); break; case "PATCH": - request = new HttpPatch(uri); + expectedRequest = new HttpPatch(uri); break; case "POST": - request = new HttpPost(uri); + expectedRequest = new HttpPost(uri); break; case "PUT": - request = new HttpPut(uri); + expectedRequest = new HttpPut(uri); break; case "TRACE": - request = new HttpTrace(uri); + expectedRequest = new HttpTrace(uri); break; default: throw new UnsupportedOperationException("method not supported: " + method); } - HttpEntity entity = null; - boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean(); - if (hasBody) { - entity = new StringEntity(randomAsciiOfLengthBetween(10, 100), ContentType.APPLICATION_JSON); - ((HttpEntityEnclosingRequest) request).setEntity(entity); + if (expectedRequest instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean()) { + HttpEntity entity = new StringEntity(randomAsciiAlphanumOfLengthBetween(10, 100), ContentType.APPLICATION_JSON); + ((HttpEntityEnclosingRequest) expectedRequest).setEntity(entity); + request.setEntity(entity); } - Header[] headers = new Header[0]; final Set uniqueNames = new HashSet<>(); if (randomBoolean()) { - headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); + Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); + request.setHeaders(headers); for (Header header : headers) { - request.addHeader(header); + expectedRequest.addHeader(header); uniqueNames.add(header.getName()); } } for (Header defaultHeader : defaultHeaders) { // request level headers override default headers if (uniqueNames.contains(defaultHeader.getName()) == false) { - request.addHeader(defaultHeader); + expectedRequest.addHeader(defaultHeader); } } try { - if (hasParams == false && hasBody == false && randomBoolean()) { - restClient.performRequest(method, uriAsString, headers); - } else if (hasBody == false && randomBoolean()) { - restClient.performRequest(method, uriAsString, params, headers); - } else { - restClient.performRequest(method, uriAsString, params, entity, headers); - } + restClient.performRequest(request); } catch(ResponseException e) { //all good } - return request; + return expectedRequest; } + /** + * @deprecated prefer {@link RestClient#performRequest(Request)}. + */ + @Deprecated private Response performRequest(String method, String endpoint, Header... headers) throws IOException { return performRequest(method, endpoint, Collections.emptyMap(), headers); } + /** + * @deprecated prefer {@link RestClient#performRequest(Request)}. + */ + @Deprecated private Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { int methodSelector; if (params.isEmpty()) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index ee6dbf449bd..872b327954b 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -52,6 +52,30 @@ public class RestClientTests extends RestClientTestCase { } public void testPerformAsyncWithUnsupportedMethod() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + try (RestClient restClient = createRestClient()) { + restClient.performRequestAsync(new Request("unsupported", randomAsciiLettersOfLength(5)), new ResponseListener() { + @Override + public void onSuccess(Response response) { + fail("should have failed because of unsupported method"); + } + + @Override + public void onFailure(Exception exception) { + assertThat(exception, instanceOf(UnsupportedOperationException.class)); + assertEquals("http method not supported: unsupported", exception.getMessage()); + latch.countDown(); + } + }); + latch.await(); + } + } + + /** + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithUnsupportedMethod()}. + */ + @Deprecated + public void testPerformAsyncOldStyleWithUnsupportedMethod() throws Exception { final CountDownLatch latch = new CountDownLatch(1); try (RestClient restClient = createRestClient()) { restClient.performRequestAsync("unsupported", randomAsciiLettersOfLength(5), new ResponseListener() { @@ -71,7 +95,11 @@ public class RestClientTests extends RestClientTestCase { } } - public void testPerformAsyncWithNullParams() throws Exception { + /** + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}. + */ + @Deprecated + public void testPerformOldStyleAsyncWithNullParams() throws Exception { final CountDownLatch latch = new CountDownLatch(1); try (RestClient restClient = createRestClient()) { restClient.performRequestAsync(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5), null, new ResponseListener() { @@ -83,7 +111,7 @@ public class RestClientTests extends RestClientTestCase { @Override public void onFailure(Exception exception) { assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("params must not be null", exception.getMessage()); + assertEquals("parameters cannot be null", exception.getMessage()); latch.countDown(); } }); @@ -91,7 +119,11 @@ public class RestClientTests extends RestClientTestCase { } } - public void testPerformAsyncWithNullHeaders() throws Exception { + /** + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}. + */ + @Deprecated + public void testPerformOldStyleAsyncWithNullHeaders() throws Exception { final CountDownLatch latch = new CountDownLatch(1); try (RestClient restClient = createRestClient()) { ResponseListener listener = new ResponseListener() { @@ -103,7 +135,7 @@ public class RestClientTests extends RestClientTestCase { @Override public void onFailure(Exception exception) { assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("request header must not be null", exception.getMessage()); + assertEquals("header cannot be null", exception.getMessage()); latch.countDown(); } }; @@ -113,6 +145,30 @@ public class RestClientTests extends RestClientTestCase { } public void testPerformAsyncWithWrongEndpoint() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + try (RestClient restClient = createRestClient()) { + restClient.performRequestAsync(new Request("GET", "::http:///"), new ResponseListener() { + @Override + public void onSuccess(Response response) { + fail("should have failed because of wrong endpoint"); + } + + @Override + public void onFailure(Exception exception) { + assertThat(exception, instanceOf(IllegalArgumentException.class)); + assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); + latch.countDown(); + } + }); + latch.await(); + } + } + + /** + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithWrongEndpoint()}. + */ + @Deprecated + public void testPerformAsyncOldStyleWithWrongEndpoint() throws Exception { final CountDownLatch latch = new CountDownLatch(1); try (RestClient restClient = createRestClient()) { restClient.performRequestAsync("GET", "::http:///", new ResponseListener() { @@ -175,6 +231,10 @@ public class RestClientTests extends RestClientTestCase { } } + /** + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testConstructor()}. + */ + @Deprecated public void testNullPath() throws IOException { try (RestClient restClient = createRestClient()) { for (String method : getHttpMethods()) { @@ -182,7 +242,7 @@ public class RestClientTests extends RestClientTestCase { restClient.performRequest(method, null); fail("path set to null should fail!"); } catch (NullPointerException e) { - assertEquals("path must not be null", e.getMessage()); + assertEquals("endpoint cannot be null", e.getMessage()); } } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index 1bad6b5f6d6..aa89a7d76ab 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -27,7 +27,9 @@ import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.CredentialsProvider; import org.apache.http.client.config.RequestConfig; +import org.apache.http.entity.BasicHttpEntity; import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.impl.nio.reactor.IOReactorConfig; @@ -37,6 +39,7 @@ import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseListener; import org.elasticsearch.client.RestClient; @@ -134,107 +137,61 @@ public class RestClientDocumentation { } { - //tag::rest-client-verb-endpoint - Response response = restClient.performRequest("GET", "/"); // <1> - //end::rest-client-verb-endpoint + //tag::rest-client-sync + Request request = new Request( + "GET", // <1> + "/"); // <2> + Response response = restClient.performRequest(request); + //end::rest-client-sync } { - //tag::rest-client-headers - Response response = restClient.performRequest("GET", "/", new BasicHeader("header", "value")); - //end::rest-client-headers - } - { - //tag::rest-client-verb-endpoint-params - Map params = Collections.singletonMap("pretty", "true"); - Response response = restClient.performRequest("GET", "/", params); // <1> - //end::rest-client-verb-endpoint-params - } - { - //tag::rest-client-verb-endpoint-params-body - Map params = Collections.emptyMap(); - String jsonString = "{" + - "\"user\":\"kimchy\"," + - "\"postDate\":\"2013-01-30\"," + - "\"message\":\"trying out Elasticsearch\"" + - "}"; - HttpEntity entity = new NStringEntity(jsonString, ContentType.APPLICATION_JSON); - Response response = restClient.performRequest("PUT", "/posts/doc/1", params, entity); // <1> - //end::rest-client-verb-endpoint-params-body - } - { - //tag::rest-client-response-consumer - Map params = Collections.emptyMap(); - HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory = - new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024); - Response response = restClient.performRequest("GET", "/posts/_search", params, null, consumerFactory); // <1> - //end::rest-client-response-consumer - } - { - //tag::rest-client-verb-endpoint-async - ResponseListener responseListener = new ResponseListener() { + //tag::rest-client-async + Request request = new Request( + "GET", // <1> + "/"); // <2> + restClient.performRequestAsync(request, new ResponseListener() { @Override public void onSuccess(Response response) { - // <1> + // <3> } @Override public void onFailure(Exception exception) { - // <2> + // <4> } - }; - restClient.performRequestAsync("GET", "/", responseListener); // <3> - //end::rest-client-verb-endpoint-async - - //tag::rest-client-headers-async - Header[] headers = { - new BasicHeader("header1", "value1"), - new BasicHeader("header2", "value2") - }; - restClient.performRequestAsync("GET", "/", responseListener, headers); - //end::rest-client-headers-async - - //tag::rest-client-verb-endpoint-params-async - Map params = Collections.singletonMap("pretty", "true"); - restClient.performRequestAsync("GET", "/", params, responseListener); // <1> - //end::rest-client-verb-endpoint-params-async - - //tag::rest-client-verb-endpoint-params-body-async - String jsonString = "{" + - "\"user\":\"kimchy\"," + - "\"postDate\":\"2013-01-30\"," + - "\"message\":\"trying out Elasticsearch\"" + - "}"; - HttpEntity entity = new NStringEntity(jsonString, ContentType.APPLICATION_JSON); - restClient.performRequestAsync("PUT", "/posts/doc/1", params, entity, responseListener); // <1> - //end::rest-client-verb-endpoint-params-body-async - - //tag::rest-client-response-consumer-async - HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory = - new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024); - restClient.performRequestAsync("GET", "/posts/_search", params, null, consumerFactory, responseListener); // <1> - //end::rest-client-response-consumer-async + }); + //end::rest-client-async } { - //tag::rest-client-response2 - Response response = restClient.performRequest("GET", "/"); - RequestLine requestLine = response.getRequestLine(); // <1> - HttpHost host = response.getHost(); // <2> - int statusCode = response.getStatusLine().getStatusCode(); // <3> - Header[] headers = response.getHeaders(); // <4> - String responseBody = EntityUtils.toString(response.getEntity()); // <5> - //end::rest-client-response2 + Request request = new Request("GET", "/"); + //tag::rest-client-parameters + request.addParameter("pretty", "true"); + //end::rest-client-parameters + //tag::rest-client-body + request.setEntity(new StringEntity( + "{\"json\":\"text\"}", + ContentType.APPLICATION_JSON)); + //end::rest-client-body + //tag::rest-client-headers + request.setHeaders( + new BasicHeader("Accept", "text/plain"), + new BasicHeader("Cache-Control", "no-cache")); + //end::rest-client-headers + //tag::rest-client-response-consumer + request.setHttpAsyncResponseConsumerFactory( + new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024)); + //end::rest-client-response-consumer } { HttpEntity[] documents = new HttpEntity[10]; //tag::rest-client-async-example final CountDownLatch latch = new CountDownLatch(documents.length); for (int i = 0; i < documents.length; i++) { + Request request = new Request("PUT", "/posts/doc/" + i); + //let's assume that the documents are stored in an HttpEntity array + request.setEntity(documents[i]); restClient.performRequestAsync( - "PUT", - "/posts/doc/" + i, - Collections.emptyMap(), - //let's assume that the documents are stored in an HttpEntity array - documents[i], + request, new ResponseListener() { @Override public void onSuccess(Response response) { @@ -253,7 +210,16 @@ public class RestClientDocumentation { latch.await(); //end::rest-client-async-example } - + { + //tag::rest-client-response2 + Response response = restClient.performRequest("GET", "/"); + RequestLine requestLine = response.getRequestLine(); // <1> + HttpHost host = response.getHost(); // <2> + int statusCode = response.getStatusLine().getStatusCode(); // <3> + Header[] headers = response.getHeaders(); // <4> + String responseBody = EntityUtils.toString(response.getEntity()); // <5> + //end::rest-client-response2 + } } @SuppressWarnings("unused") diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index c774b023254..c35f9494916 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -65,11 +65,18 @@ written to by an older Elasticsearch after writing to it with a newer Elasticsea <> ({pull}30255[#30255]) +Added new "Request" object flavored request methods. Prefer these instead of the +multi-argument versions. ({pull}29623[#29623]) + + [float] === Bug Fixes Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) +=== Regressions + +=== Known Issues //[float] //=== Regressions diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index db27b886b41..5ffc4332a76 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -218,93 +218,74 @@ http://hc.apache.org/httpcomponents-asyncclient-dev/httpasyncclient/apidocs/org/ [[java-rest-low-usage-requests]] === Performing requests -Once the `RestClient` has been created, requests can be sent by calling one of -the available `performRequest` or `performRequestAsync` method variants. -The `performRequest` methods are synchronous and return the `Response` directly, -meaning that the client will block and wait for a response to be returned. -The `performRequestAsync` variants return `void` and accept an extra -`ResponseListener` as an argument instead, meaning that they are executed -asynchronously. The provided listener will be notified upon request completion -or failure. +Once the `RestClient` has been created, requests can be sent by calling either +`performRequest` or `performRequestAsync`. `performRequest` is synchronous and +will block the calling thread and return the `Response` when the request is +successful or throw an exception if it fails. `performRequestAsync` is +asynchronous and accepts a `ResponseListener` argument that it calls with a +`Response` when the request is successful or with an `Exception` if it4 fails. + +This is synchronous: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint] +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-sync] -------------------------------------------------- -<1> Send a request by providing only the verb and the endpoint, minimum set -of required arguments +<1> The HTTP method (`GET`, `POST`, `HEAD`, etc) +<2> The endpoint on the server + +And this is asynchronous: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-params] +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-async] -------------------------------------------------- -<1> Send a request by providing the verb, the endpoint, and some querystring -parameter +<1> The HTTP method (`GET`, `POST`, `HEAD`, etc) +<2> The endpoint on the server +<3> Handle the response +<4> Handle the failure + +You can add request parameters to the request object: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-params-body] +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-parameters] +-------------------------------------------------- + +You can set the body of the request to any `HttpEntity`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body] -------------------------------------------------- -<1> Send a request by providing the verb, the endpoint, optional querystring -parameters and the request body enclosed in an `org.apache.http.HttpEntity` -object IMPORTANT: The `ContentType` specified for the `HttpEntity` is important because it will be used to set the `Content-Type` header so that Elasticsearch can properly parse the content. +And you can set a list of headers to send with the request: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers] +-------------------------------------------------- + +You can also customize the response consumer used to buffer the asynchronous +responses. The default consumer will buffer up to 100MB of response on the +JVM heap. If the response is larger then the request will fail. You could, +for example, lower the maximum size which might be useful if you are running +in a heap constrained environment: + ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response-consumer] -------------------------------------------------- -<1> Send a request by providing the verb, the endpoint, optional querystring -parameters, optional request body and the optional factory that is used to -create an http://hc.apache.org/httpcomponents-core-ga/httpcore-nio/apidocs/org/apache/http/nio/protocol/HttpAsyncResponseConsumer.html[`org.apache.http.nio.protocol.HttpAsyncResponseConsumer`] -callback instance per request attempt. Controls how the response body gets -streamed from a non-blocking HTTP connection on the client side. When not -provided, the default implementation is used which buffers the whole response -body in heap memory, up to 100 MB. -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-async] --------------------------------------------------- -<1> Define what needs to happen when the request is successfully performed -<2> Define what needs to happen when the request fails, meaning whenever -there's a connection error or a response with error status code is returned. -<3> Send an async request by providing only the verb, the endpoint, and the -response listener to be notified once the request is completed, minimum set -of required arguments +==== Multiple parallel asynchronous actions -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-params-async] --------------------------------------------------- -<1> Send an async request by providing the verb, the endpoint, some querystring -parameter and the response listener to be notified once the request is completed - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-params-body-async] --------------------------------------------------- -<1> Send an async request by providing the verb, the endpoint, optional -querystring parameters, the request body enclosed in an -`org.apache.http.HttpEntity` object and the response listener to be -notified once the request is completed - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response-consumer-async] --------------------------------------------------- -<1> Send an async request by providing the verb, the endpoint, optional -querystring parameters, optional request body and the optional factory that is -used to create an http://hc.apache.org/httpcomponents-core-ga/httpcore-nio/apidocs/org/apache/http/nio/protocol/HttpAsyncResponseConsumer.html[`org.apache.http.nio.protocol.HttpAsyncResponseConsumer`] -callback instance per request attempt. Controls how the response body gets -streamed from a non-blocking HTTP connection on the client side. When not -provided, the default implementation is used which buffers the whole response -body in heap memory, up to 100 MB. - -The following is a basic example of how async requests can be sent: +The client is quite happy to execute many actions in parallel. The following +example indexes many documents in parallel. In a real world scenario you'd +probably want to use the `_bulk` API instead, but the example is illustative. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -314,19 +295,6 @@ include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-async-examp <2> Handle the returned exception, due to communication error or a response with status code that indicates an error -Each of the above listed method supports sending headers along with the -request through a `Header` varargs argument as in the following examples: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers] --------------------------------------------------- - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers-async] --------------------------------------------------- - [[java-rest-low-usage-responses]] === Reading responses @@ -396,4 +364,3 @@ still yields the same response as it did. Enable trace logging for the `tracer` package to have such log lines printed out. Do note that this type of logging is expensive and should not be enabled at all times in production environments, but rather temporarily used only when needed. - diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index 3f6f4bcbc4f..063a0ad31f3 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -79,7 +79,11 @@ final class RemoteRequestBuilders { } params.put("size", Integer.toString(searchRequest.source().size())); if (searchRequest.source().version() == null || searchRequest.source().version() == true) { - // false is the only value that makes it false. Null defaults to true.... + /* + * Passing `null` here just add the `version` request parameter + * without any value. This way of requesting the version works + * for all supported versions of Elasticsearch. + */ params.put("version", null); } if (searchRequest.source().sorts() != null) { From 1a0838bd0b19dd83b5511115afafb3c514010f5b Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 1 May 2018 11:53:11 -0700 Subject: [PATCH 58/68] [DOCS] Removes redundant SAML realm settings (#30196) --- .../authentication/saml-realm.asciidoc | 229 +----------------- .../en/settings/security-settings.asciidoc | 211 ++++++++++------ 2 files changed, 140 insertions(+), 300 deletions(-) diff --git a/x-pack/docs/en/security/authentication/saml-realm.asciidoc b/x-pack/docs/en/security/authentication/saml-realm.asciidoc index 4de8d5a28ce..bbf7d597b30 100644 --- a/x-pack/docs/en/security/authentication/saml-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-realm.asciidoc @@ -25,238 +25,19 @@ for SAML realms. [[saml-settings]] ==== SAML Realm Settings -[cols="4,^3,10"] -|======================= -| Setting | Required | Description -| `type` | yes | Indicates the realm type. Must be set to `saml`. -| `order` | no | Indicates the priority of this realm within the realm chain. - Realms with a lower order are consulted first. Although not - required, we recommend explicitly setting this value when - you configure multiple realms. Defaults to `Integer.MAX_VALUE`. -| `enabled` | no | Indicates whether this realm is enabled or disabled. Enables - you to disable a realm without removing its configuration. - Defaults to `true`. -| `idp.entity_id` | yes | The Entity ID of the SAML Identity Provider. An Entity ID is - a URI with a maximum length of 1024 characters. It can be a - URL (`https://idp.example.com/`) or a URN (`urn:example.com:idp`) - and can be found in the configuration or the SAML metadata - of the Identity Provider. -| `idp.metadata.path` | yes | The path (_recommended_) or URL to a SAML 2.0 metadata file - describing the capabilities and configuration of the Identity - Provider. - If a path is provided, then it is resolved relative to the - {es} config directory. - If a URL is provided, then it must be either a `file` URL or - a `https` URL. - {security} will automatically poll this metadata resource and - will reload the IdP configuration when changes are detected. - File based resources are polled at a frequency determined by - the global {es} `resource.reload.interval.high` setting, which - defaults to 5 seconds. - HTTPS resources are polled at a frequency determined by - the realm's `idp.metadata.http.refresh` setting. -| `idp.metadata.http.refresh` | no | Controls the frequency with which `https` metadata is checked - for changes. Defaults to 1 hour. -| `idp.use_single_logout` | no | Indicates whether to utilise the Identity Provider's Single - Logout service (if one exists in the IdP metadata file). - Defaults to `true`. -| `sp.entity_id` | yes | The Entity ID to use for this SAML Service Provider. - This should be entered as a URI. We recommend that you use the - base URL of your {kib} instance, - e.g. `https://kibana.example.com/` -| `sp.acs` | yes | The URL of the Assertion Consumer Service within {kib}. - Typically this will be the "api/security/v1/saml" endpoint of - your {kib} server, - e.g. `https://kibana.example.com/api/security/v1/saml` -| `sp.logout` | no | The URL of the Single Logout service within {kib}. - Typically this will be the "logout" endpoint of - your {kib} server, - e.g. `https://kibana.example.com/logout` -| `attributes.principal` | yes | The Name of the SAML attribute that should be used as the - {security} user's principal (username) -| `attributes.groups` | no | The Name of the SAML attribute that should be used to populate - {security} user's groups -| `attributes.name` | no | The Name of the SAML attribute that should be used to populate - {security} user's full name -| `attributes.mail` | no | The Name of the SAML attribute that should be used to populate - {security} user's email address -| `attributes.dn` | no | The Name of the SAML attribute that should be used to populate - {security} user's X.500 _Distinguished Name_ -| `attribute_patterns.principal` | no | A java regular expression that is matched against the SAML attribute - specified by `attributes.pattern` before it is applied to the user's - _principal_ property. - The attribute value must match the pattern, and the value of the - first _capturing group_ is used as the principal. - e.g. `^([^@]+)@example\\.com$` matches email addresses from the - "example.com" domain and uses the local-part as the principal. -| `attribute_patterns.groups` | no | As per `attribute_patterns.principal`, but for the _group_ property. -| `attribute_patterns.name` | no | As per `attribute_patterns.principal`, but for the _name_ property. -| `attribute_patterns.mail` | no | As per `attribute_patterns.principal`, but for the _mail_ property. -| `attribute_patterns.dn` | no | As per `attribute_patterns.principal`, but for the _dn_ property. -| `nameid_format` | no | The NameID format that should be requested when asking the IdP - to authenticate the current user. - Defaults to requesting _transient_ names - (`urn:oasis:names:tc:SAML:2.0:nameid-format:transient`) -| `nameid.allow_create` | no | The value of the `AllowCreate` attribute of the `NameIdPolicy` - element in an authentication request. - Defaults to `false` -| `nameid.sp_qualifier` | no | The value of the `SPNameQualifier` attribute of the `NameIdPolicy` - element in an authentication request. - The default is to not include the `SPNameQualifier` attribute. -| `force_authn` | no | Whether to set the `ForceAuthn` attribute when requesting that the - IdP authenticate the current user. If this is set to `true`, the - IdP will be required to freshly establish the user's identity, - irrespective of any exiting sessions they may have. - Defaults to `false`. -| `populate_user_metadata` | no | Whether to populate the {es} user's metadata with the values that - are provided by the SAML attributes. Defaults to `true`. -| `allowed_clock_skew` | no | The maximum amount of skew that can be tolerated between the - IdP's clock and the {es} node's clock. Defaults to 3 minutes. -|======================= +See {ref}/security-settings.html#ref-saml-settings[SAML Realm Settings]. + ===== SAML Realm Signing Settings -If a signing key is configured (i.e. is one of `signing.key` or `signing.keystore.path` has been set), then -{security} will sign outgoing SAML messages. Signing can be configured using the following settings. +See {ref}/security-settings.html#ref-saml-signing-settings[SAML Realm Signing Settings]. -|======================= -| Setting | Required | Description -| `signing.saml_messages` | no | A list of SAML message types that should be signed, or `*` to - sign all messages. Each element in the list should be the - local name of a SAML XML Element. Supported element types are - `AuthnRequest`, `LogoutRequest` and `LogoutResponse`. - Defaults to `*`. -| `signing.key` | no | Specifies the path to the PEM encoded private key to use for - SAML message signing. - `signing.key` and `signing.keystore.path` may not be used at - the same time. -| `signing.secure_key_passphrase` | no | ({ref}/secure-settings.html[Secure]) - Specifies the passphrase to decrypt the PEM encoded private key if - it is encrypted. -| `signing.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate - chain) that corresponds to the `signing.key`. This certificate - must also be included in the Service Provider metadata, or - manually configured within the IdP to allow for signature - validation. - May only be used if `signing.key` is set. -| `signing.keystore.path` | no | The path to the keystore that contains a private key and - certificate. - Must be either a Java Keystore (jks) or a PKCS#12 file. - `signing.key` and `signing.keystore.path` may not be used at the - same time. -| `signing.keystore.type` | no | The type of the keystore. Must be one of "jks" or "PKCS12". - Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or - "pkcs12", otherwise uses "jks" -| `signing.keystore.alias` | no | Specifies the alias of the key within the keystore that should be - used for SAML message signing. Must be specified if the keystore - contains more than one private key. -| `signing.keystore.secure_password` | no | ({ref}/secure-settings.html[Secure]) The password to the keystore. -| `signing.keystore.secure_key_password` | no | ({ref}/secure-settings.html[Secure]) - The password for the key in the keystore. - Defaults to the keystore password. -|======================= ===== SAML Realm Encryption Settings -If an encryption key is configured (i.e. is one of `encryption.key` or -`encryption.keystore.path` has been set), then {security} will publish -an encryption certificate when generating metadata, and will attempt to -decrypt incoming SAML content. -Encryption can be configured using the following settings. - -|======================= -| Setting | Required | Description -| `encryption.key` | no | Specifies the path to the PEM encoded private key to use for - SAML message descryption. - `encryption.key` and `encryption.keystore.path` may not be used at - the same time. -| `encryption.secure_key_passphrase` | no | ({ref}/secure-settings.html[Secure]) - Specifies the passphrase to decrypt the PEM encoded private key if - it is encrypted. -| `encryption.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate - chain) that is associated with the `encryption.key`. This - certificate must also be included in the Service Provider metadata, - or manually configured within the IdP to enable message encryption. - May only be used if `encryption.key` is set. -| `encryption.keystore.path` | no | The path to the keystore that contains a private key and - certificate. - Must be either a Java Keystore (jks) or a PKCS#12 file. - `encryption.key` and `encryption.keystore.path` may not be used at - the same time. -| `encryption.keystore.type` | no | The type of the keystore. Must be one of "jks" or "PKCS12". - Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or - "pkcs12", otherwise uses "jks" -| `encryption.keystore.alias` | no | Specifies the alias of the key within the keystore that should be - used for SAML message decryption. If not specified, all compatible - key pairs from the keystore will be considered as candidate keys - for decryption. -| `encryption.keystore.secure_password` | no | ({ref}/secure-settings.html[Secure]) The password to the keystore. -| `encryption.keystore.secure_key_password` | no | ({ref}/secure-settings.html[Secure]) - The password for the key in the keystore. Only a single password is - supported. If you are using multiple decryption keys, then they - cannot have individual passwords. -|======================= +See {ref}/security-settings.html#ref-saml-encryption-settings[SAML Realm Encryption Settings]. ===== SAML Realm SSL Settings -If you are loading the IdP metadata over SSL/TLS (that is, `idp.metadata.path` is a URL using the `https` protocol) -Then the following settings may be used to configure SSL. If these are not specified, then the {xpack} -{ref}/security-settings.html#ssl-tls-settings[default SSL settings] are used. - -These settings are not used for any purpose other than loading metadata over https. - -|======================= -| Setting | Required | Description -| `ssl.key` | no | Specifies the path to the PEM encoded private key to use for http - client authentication. - `ssl.key` and `ssl.keystore.path` may not be used at the same time. -| `ssl.key_passphrase` | no | Specifies the passphrase to decrypt the PEM encoded private key if - it is encrypted. May not be used with `ssl.secure_key_passphrase` -| `ssl.secure_key_passphrase` | no | ({ref}/secure-settings.html[Secure]) - Specifies the passphrase to decrypt the PEM encoded private key if - it is encrypted. May not be used with `ssl.key_passphrase` -| `ssl.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate - chain) that goes with the key. May only be used if `ssl.key` is set. -| `ssl.certificate_authorities` | no | Specifies the paths to the PEM encoded certificate authority - certificates that should be trusted. - `ssl.certificate_authorities` and `ssl.truststore.path` may not be - used at the same time. -| `ssl.keystore.path` | no | The path to the keystore that contains a private key and - certificate. - Must be either a Java Keystore (jks) or a PKCS#12 file. - `ssl.key` and `ssl.keystore.path` may not be used at the same time. -| `ssl.keystore.type` | no | The type of the keystore. Must be one of "jks" or "PKCS12". - Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or - "pkcs12", otherwise uses "jks" -| `ssl.keystore.password` | no | The password to the keystore. - May not be used with `ssl.keystore.secure_password`. -| `ssl.keystore.secure_password` | no | ({ref}/secure-settings.html[Secure]) The password to the keystore. - May not be used with `ssl.keystore.password`. -| `ssl.keystore.key_password` | no | The password for the key in the keystore. - Defaults to the keystore password. - May not be used with `ssl.keystore.secure_key_password`. -| `ssl.keystore.secure_key_password` | no | ({ref}/secure-settings.html[Secure]) - The password for the key in the keystore. - Defaults to the keystore password. - May not be used with `ssl.keystore.key_password`. -| `ssl.truststore.path` | no | The path to the keystore that contains the certificates to trust. - Must be either a Java Keystore (jks) or a PKCS#12 file. - `ssl.certificate_authorities` and `ssl.truststore.path` may not be - used at the same time. -| `ssl.truststore.type` | no | The type of the truststore. Must be one of "jks" or "PKCS12". - Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or - "pkcs12", otherwise uses "jks" -| `ssl.truststore.password` | no | The password to the truststore. - May not be used with `ssl.truststore.secure_password`. -| `ssl.truststore.secure_password` | no | ({ref}/secure-settings.html[Secure]) The password to the truststore. - May not be used with `ssl.truststore.password`. -| `ssl.verification_mode` | no | One of `full` (verify the hostname and the certicate path), - `certificate` (verify the certificate path, but not the hostname) - or `none` (perform no verification). Defaults to `full`. -+ - See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`] - for a more detailed explanation of these values. -| `ssl.supported_protocols` | no | Specifies the supported protocols for TLS/SSL. -| `ssl.cipher_suites` | no | Specifies the cipher suites that should be supported. -|======================= +See {ref}/security-settings.html#ref-saml-ssl-settings[SAML Realm SSL Settings]. diff --git a/x-pack/docs/en/settings/security-settings.asciidoc b/x-pack/docs/en/settings/security-settings.asciidoc index ba281b5ccb4..963d5a5f002 100644 --- a/x-pack/docs/en/settings/security-settings.asciidoc +++ b/x-pack/docs/en/settings/security-settings.asciidoc @@ -689,6 +689,10 @@ LDAP operation (such as `search`). Defaults to `true`. [float] ===== PKI realm settings +The `type` setting must be set to `pki`. In addition to the +<>, you can specify +the following settings: + `username_pattern`:: The regular expression pattern used to extract the username from the certificate DN. The first match group is the used as the username. @@ -735,8 +739,16 @@ Defaults to `100000`. [[ref-saml-settings]] [float] ===== SAML realm settings + +The `type` setting must be set to `saml`. In addition to the +<>, you can specify +the following settings: + `idp.entity_id`:: -The Entity ID of the SAML Identity Provider +The Entity ID of the SAML Identity Provider. An Entity ID is a URI with a +maximum length of 1024 characters. It can be a URL (https://idp.example.com/) or +a URN (`urn:example.com:idp`) and can be found in the configuration or the SAML +metadata of the Identity Provider. `idp.metadata.path`:: The path _(recommended)_ or URL to a SAML 2.0 metadata file describing the @@ -744,7 +756,7 @@ capabilities and configuration of the Identity Provider. If a path is provided, then it is resolved relative to the {es} config directory. If a URL is provided, then it must be either a `file` URL or a `https` URL. -{security} will automatically poll this metadata resource and will reload +{security} automatically polls this metadata resource and reloads the IdP configuration when changes are detected. File based resources are polled at a frequency determined by the global {es} `resource.reload.interval.high` setting, which defaults to 5 seconds. @@ -761,39 +773,47 @@ Indicates whether to utilise the Identity Provider's Single Logout service Defaults to `true`. `sp.entity_id`:: -The Entity ID to use for this SAML Service Provider, entered as a URI. +The Entity ID to use for this SAML Service Provider. This should be entered as a +URI. We recommend that you use the base URL of your Kibana instance. For example, +`https://kibana.example.com/`. `sp.acs`:: -The URL of the Assertion Consumer Service within {kib}. +The URL of the Assertion Consumer Service within {kib}. Typically this is the +"api/security/v1/saml" endpoint of your Kibana server. For example, +`https://kibana.example.com/api/security/v1/saml`. `sp.logout`:: -The URL of the Single Logout service within {kib}. +The URL of the Single Logout service within {kib}. Typically this is the +"logout" endpoint of your Kibana server. For example, +`https://kibana.example.com/logout`. `attributes.principal`:: The Name of the SAML attribute that should be used as the {security} user's -principal (username) +principal (username). `attributes.groups`:: The Name of the SAML attribute that should be used to populate {security} -user's groups +user's groups. `attributes.name`:: The Name of the SAML attribute that should be used to populate {security} -user's full name +user's full name. `attributes.mail`:: The Name of the SAML attribute that should be used to populate {security} -user's email address +user's email address. `attributes.dn`:: The Name of the SAML attribute that should be used to populate {security} -user's X.500 _Distinguished Name_ +user's X.500 _Distinguished Name_. `attribute_patterns.principal`:: -A java regular expression that is matched against the SAML attribute specified +A Java regular expression that is matched against the SAML attribute specified by `attributes.pattern` before it is applied to the user's _principal_ property. -The attribute value must match the pattern, and the value of the first -_capturing group_ is used as the principal. +The attribute value must match the pattern and the value of the first +_capturing group_ is used as the principal. For example, `^([^@]+)@example\\.com$` +matches email addresses from the "example.com" domain and uses the local-part as +the principal. `attribute_patterns.groups`:: As per `attribute_patterns.principal`, but for the _group_ property. @@ -809,26 +829,41 @@ As per `attribute_patterns.principal`, but for the _dn_ property. `nameid_format`:: The NameID format that should be requested when asking the IdP to authenticate -the current user. -Defaults to `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` +the current user. Defaults to requesting _transient_ names +(`urn:oasis:names:tc:SAML:2.0:nameid-format:transient`). + +`nameid.allow_create`:: The value of the `AllowCreate` attribute of the +`NameIdPolicy` element in an authentication request. Defaults to `false`. + +`nameid.sp_qualifier`:: The value of the `SPNameQualifier` attribute of the +`NameIdPolicy` element in an authentication request. The default is to not +include the `SPNameQualifier` attribute. `force_authn`:: -Whether to set the `ForceAuthn` attribute when requesting that the IdP -authenticate the current user. +Specifies whether to set the `ForceAuthn` attribute when requesting that the IdP +authenticate the current user. If set to `true`, the IdP is required to verify +the user’s identity, irrespective of any existing sessions they might have. Defaults to `false`. `populate_user_metadata`:: -Whether to populate the {es} user's metadata with the values that are provided -by the SAML attributes. -Defaults to `true`. +Specifies whether to populate the {es} user's metadata with the values that are +provided by the SAML attributes. Defaults to `true`. `allowed_clock_skew`:: The maximum amount of skew that can be tolerated between the IdP's clock and the {es} node's clock. Defaults to `3m` (3 minutes). +[float] +[[ref-saml-signing-settings]] +===== SAML realm signing settings + +If a signing key is configured (that is, either `signing.key` or +`signing.keystore.path` is set), then {security} signs outgoing SAML messages. +Signing can be configured using the following settings: + `signing.saml_messages`:: -A list of SAML message types that should be signed, or `*` to sign all messages. +A list of SAML message types that should be signed or `*` to sign all messages. Each element in the list should be the local name of a SAML XML Element. Supported element types are `AuthnRequest`, `LogoutRequest` and `LogoutResponse`. Only valid if `signing.key` or `signing.keystore.path` is also specified. @@ -836,152 +871,177 @@ Defaults to `*`. `signing.key`:: Specifies the path to the PEM encoded private key to use for SAML message signing. -`signing.key` and `signing.keystore.path` may not be used at the same time. +`signing.key` and `signing.keystore.path` cannot be used at the same time. `signing.secure_key_passphrase` (<>):: Specifies the passphrase to decrypt the PEM encoded private key (`signing.key`) if it is encrypted. `signing.certificate`:: -Specifies the path to the PEM encoded certificate that corresponds to the -`signing.key`. May only be used if `signing.key` is set. +Specifies the path to the PEM encoded certificate (or certificate chain) that +corresponds to the `signing.key`. This certificate must also be included in the +Service Provider metadata or manually configured within the IdP to allow for +signature validation. This setting can only be used if `signing.key` is set. `signing.keystore.path`:: The path to the keystore that contains a private key and certificate. Must be either a Java Keystore (jks) or a PKCS#12 file. -`signing.key` and `signing.keystore.path` may not be used at the same time. +`signing.key` and `signing.keystore.path` cannot be used at the same time. `signing.keystore.type`:: -The type of the keystore (`signing.keystore.path`). -Must be one of "jks" or "PKCS12". Defaults to "PKCS12" if the keystore path -ends in ".p12", ".pfx" or "pkcs12", otherwise uses "jks". +The type of the keystore in `signing.keystore.path`. +Must be either `jks` or `PKCS12`. If the keystore path ends in ".p12", ".pfx", +or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`. `signing.keystore.alias`:: Specifies the alias of the key within the keystore that should be -used for SAML message signing. Must be specified if the keystore -contains more than one private key. +used for SAML message signing. If the keystore contains more than one private +key, this setting must be specified. `signing.keystore.secure_password` (<>):: -The password to the keystore (`signing.keystore.path`). +The password to the keystore in `signing.keystore.path`. `signing.keystore.secure_key_password` (<>):: The password for the key in the keystore (`signing.keystore.path`). Defaults to the keystore password. +[float] +[[ref-saml-encryption-settings]] +===== SAML realm encryption settings + +If an encryption key is configured (that is, either `encryption.key` or +`encryption.keystore.path` is set), then {security} publishes an encryption +certificate when generating metadata and attempts to decrypt incoming SAML +content. Encryption can be configured using the following settings: + `encryption.key`:: Specifies the path to the PEM encoded private key to use for SAML message decryption. -`encryption.key` and `encryption.keystore.path` may not be used at the same time. +`encryption.key` and `encryption.keystore.path` cannot be used at the same time. `encryption.secure_key_passphrase` (<>):: Specifies the passphrase to decrypt the PEM encoded private key (`encryption.key`) if it is encrypted. `encryption.certificate`:: -Specifies the path to the PEM encoded certificate chain that is associated with -the `encryption.key`. May only be used if `encryption.key` is set. +Specifies the path to the PEM encoded certificate (or certificate chain) that is +associated with the `encryption.key`. This certificate must also be included in +the Service Provider metadata or manually configured within the IdP to enable +message encryption. This setting can be used only if `encryption.key` is set. `encryption.keystore.path`:: The path to the keystore that contains a private key and certificate. Must be either a Java Keystore (jks) or a PKCS#12 file. -`encryption.key` and `encryption.keystore.path` may not be used at the same time. +`encryption.key` and `encryption.keystore.path` cannot be used at the same time. `encryption.keystore.type`:: The type of the keystore (`encryption.keystore.path`). -Must be one of "jks" or "PKCS12". Defaults to "PKCS12" if the keystore path -ends in ".p12", ".pfx" or "pkcs12", otherwise uses "jks". +Must be either `jks` or `PKCS12`. If the keystore path ends in ".p12", ".pfx", +or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`. `encryption.keystore.alias`:: Specifies the alias of the key within the keystore (`encryption.keystore.path`) that should be used for SAML message decryption. If not specified, all compatible -key pairs from the keystore will be considered as candidate keys for decryption. +key pairs from the keystore are considered as candidate keys for decryption. `encryption.keystore.secure_password` (<>):: The password to the keystore (`encryption.keystore.path`). `encryption.keystore.secure_key_password` (<>):: The password for the key in the keystore (`encryption.keystore.path`). Only a -single password is supported. If you are using multiple decryption keys, then +single password is supported. If you are using multiple decryption keys, they cannot have individual passwords. +[float] +[[ref-saml-ssl-settings]] +===== SAML realm SSL settings + +If you are loading the IdP metadata over SSL/TLS (that is, `idp.metadata.path` +is a URL using the `https` protocol), the following settings can be used to +configure SSL. If these are not specified, then the +<> are used. + +NOTE: These settings are not used for any purpose other than loading metadata +over https. + `ssl.key`:: -If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the -path to the PEM encoded private key to use for http client authentication (if -required). `ssl.key` and `ssl.keystore.path` may not be used at the same time. +Specifies the path to the PEM encoded private key to use for http client +authentication (if required). `ssl.key` and `ssl.keystore.path` cannot be used +at the same time. `ssl.key_passphrase`:: -If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the +Specifies the passphrase to decrypt the PEM encoded private key (`ssl.key`) if it is -encrypted. May not be used with `ssl.secure_key_passphrase` +encrypted. Cannot be used with `ssl.secure_key_passphrase`. `ssl.secure_key_passphrase` (<>):: -If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the +Specifies the passphrase to decrypt the PEM encoded private key (`ssl.key`) if it is -encrypted. May not be used with `ssl.key_passphrase` +encrypted. Cannot be used with `ssl.key_passphrase`. `ssl.certificate`:: -If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the +Specifies the path to the PEM encoded certificate (or certificate chain) that is associated -with the key (`ssl.key`). May only be used if `ssl.key` is set. +with the key (`ssl.key`). This setting can be used only if `ssl.key` is set. `ssl.certificate_authorities`:: -If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the +Specifies the paths to the PEM encoded certificate authority certificates that should be -trusted. `ssl.certificate_authorities` and `ssl.truststore.path` may not be +trusted. `ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the same time. `ssl.keystore.path`:: -If retrieving IDP metadata via https (see `idp.metadata.path`), the path to +Specifies the path to the keystore that contains a private key and certificate. Must be either a Java Keystore (jks) or a PKCS#12 file. -`ssl.key` and `ssl.keystore.path` may not be used at the same time. +`ssl.key` and `ssl.keystore.path` cannot be used at the same time. `ssl.keystore.type`:: -The type of the keystore (`ssl.keystore.path`). Must be one of "jks" or "PKCS12". -Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or "pkcs12", -otherwise uses "jks" +The type of the keystore (`ssl.keystore.path`). Must be either `jks` or `PKCS12`. +If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting defaults +to `PKCS12`. Otherwise, it defaults to `jks`. `ssl.keystore.password`:: -The password to the keystore (`ssl.keystore.path`). -May not be used with `ssl.keystore.secure_password`. +The password to the keystore (`ssl.keystore.path`). This setting cannot be used +with `ssl.keystore.secure_password`. `ssl.keystore.secure_password` (<>):: The password to the keystore (`ssl.keystore.path`). -May not be used with `ssl.keystore.password`. +This setting cannot be used with `ssl.keystore.password`. `ssl.keystore.key_password`:: The password for the key in the keystore (`ssl.keystore.path`). -Defaults to the keystore password. -May not be used with `ssl.keystore.secure_key_password`. +Defaults to the keystore password. This setting cannot be used with +`ssl.keystore.secure_key_password`. `ssl.keystore.secure_key_password` (<>):: The password for the key in the keystore (`ssl.keystore.path`). -Defaults to the keystore password. -May not be used with `ssl.keystore.key_password`. +Defaults to the keystore password. This setting cannot be used with +`ssl.keystore.key_password`. `ssl.truststore.path`:: -If retrieving IDP metadata via https (see `idp.metadata.path`), the path to the +The path to the keystore that contains the certificates to trust. Must be either a Java Keystore (jks) or a PKCS#12 file. -`ssl.certificate_authorities` and `ssl.truststore.path` may not be used at the +`ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the same time. `ssl.truststore.type`:: -The type of the truststore (`ssl.truststore.path`). Must be one of "jks" or "PKCS12". -Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or "pkcs12", -otherwise uses "jks" +The type of the truststore (`ssl.truststore.path`). Must be either `jks` or +`PKCS12`. If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting +defaults to `PKCS12`. Otherwise, it defaults to `jks`. `ssl.truststore.password`:: -The password to the truststore (`ssl.truststore.path`). -May not be used with `ssl.truststore.secure_password`. +The password to the truststore (`ssl.truststore.path`). This setting cannot be +used with `ssl.truststore.secure_password`. `ssl.truststore.secure_password` (<>):: -The password to the truststore (`ssl.truststore.path`). -May not be used with `ssl.truststore.password`. +The password to the truststore (`ssl.truststore.path`). This setting cannot be +used with `ssl.truststore.password`. `ssl.verification_mode`:: -If retrieving IDP metadata via https (see `idp.metadata.path`), one of `full` -(verify the hostname and the certicate path), `certificate` (verify the +One of `full` +(verify the hostname and the certificate path), `certificate` (verify the certificate path, but not the hostname) or `none` (perform no verification). Defaults to `full`. + @@ -989,11 +1049,10 @@ See <> for a more detailed explanation of these values. `ssl.supported_protocols`:: -If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the -supported protocols for TLS/SSL. +Specifies the supported protocols for TLS/SSL. `ssl.cipher_suites`:: -If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the +Specifies the cipher suites that should be supported. [float] From cdae8ff5a0760862548834a1c9c96b71e5a8ff77 Mon Sep 17 00:00:00 2001 From: jaymode Date: Tue, 1 May 2018 13:15:59 -0600 Subject: [PATCH 59/68] Test: increase authentication logging for debugging This commit increases the logging for authentication in the x-pack multi-node qa test project. This is needed to assist in debugging HTTP authorization failures in waiting for the second node in these tests. See #30306 --- x-pack/qa/multi-node/build.gradle | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/multi-node/build.gradle b/x-pack/qa/multi-node/build.gradle index bff9d8652b9..6051fc8acd1 100644 --- a/x-pack/qa/multi-node/build.gradle +++ b/x-pack/qa/multi-node/build.gradle @@ -12,6 +12,7 @@ integTestCluster { setting 'xpack.watcher.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.ml.enabled', 'false' + setting 'logger.org.elasticsearch.xpack.security.authc', 'TRACE' extraConfigFile 'roles.yml', 'roles.yml' setupCommand 'setup-test-user', 'bin/elasticsearch-users', 'useradd', 'test-user', '-p', 'x-pack-test-password', '-r', 'test' setupCommand 'setup-super-user', 'bin/elasticsearch-users', 'useradd', 'super-user', '-p', 'x-pack-super-password', '-r', 'superuser' From 62a9b8909ecc54e59643784384d44f5b4e03cf6a Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Tue, 1 May 2018 15:02:06 -0500 Subject: [PATCH 60/68] Remove RepositoriesMetaData variadic constructor (#29569) The variadic constructor was only used in a few places and the RepositoriesMetaData class is backed by a List anyway, so just using a List will make it simpler to instantiate it. --- .../cluster/metadata/RepositoriesMetaData.java | 6 +++--- .../elasticsearch/repositories/RepositoriesService.java | 7 ++++--- .../org/elasticsearch/cluster/ClusterStateDiffIT.java | 2 +- .../snapshots/RepositoriesMetaDataSerializationTests.java | 8 ++++---- .../license/LicensesMetaDataSerializationTests.java | 2 +- .../xpack/watcher/WatcherMetaDataSerializationTests.java | 2 +- 6 files changed, 14 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java index 6aa2d83fa8d..7a0b9285896 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java @@ -50,8 +50,8 @@ public class RepositoriesMetaData extends AbstractNamedDiffable implemen * * @param repositories list of repositories */ - public RepositoriesMetaData(RepositoryMetaData... repositories) { - this.repositories = Arrays.asList(repositories); + public RepositoriesMetaData(List repositories) { + this.repositories = repositories; } /** @@ -164,7 +164,7 @@ public class RepositoriesMetaData extends AbstractNamedDiffable implemen throw new ElasticsearchParseException("failed to parse repositories"); } } - return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()])); + return new RepositoriesMetaData(repository); } /** diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 577ccc78de7..636e108468e 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -114,7 +114,8 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE); if (repositories == null) { logger.info("put repository [{}]", request.name); - repositories = new RepositoriesMetaData(new RepositoryMetaData(request.name, request.type, request.settings)); + repositories = new RepositoriesMetaData( + Collections.singletonList(new RepositoryMetaData(request.name, request.type, request.settings))); } else { boolean found = false; List repositoriesMetaData = new ArrayList<>(repositories.repositories().size() + 1); @@ -133,7 +134,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta } else { logger.info("update repository [{}]", request.name); } - repositories = new RepositoriesMetaData(repositoriesMetaData.toArray(new RepositoryMetaData[repositoriesMetaData.size()])); + repositories = new RepositoriesMetaData(repositoriesMetaData); } mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories); return ClusterState.builder(currentState).metaData(mdBuilder).build(); @@ -185,7 +186,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta } } if (changed) { - repositories = new RepositoriesMetaData(repositoriesMetaData.toArray(new RepositoryMetaData[repositoriesMetaData.size()])); + repositories = new RepositoriesMetaData(repositoriesMetaData); mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories); return ClusterState.builder(currentState).metaData(mdBuilder).build(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 29e3080bfe1..a5d865a2741 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -640,7 +640,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase { @Override public MetaData.Custom randomCreate(String name) { if (randomBoolean()) { - return new RepositoriesMetaData(); + return new RepositoriesMetaData(Collections.emptyList()); } else { return IndexGraveyardTests.createRandom(); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetaDataSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetaDataSerializationTests.java index dc0c7b55c6d..7627dafa5a9 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetaDataSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetaDataSerializationTests.java @@ -45,7 +45,7 @@ public class RepositoriesMetaDataSerializationTests extends AbstractDiffableSeri entries.add(new RepositoryMetaData(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings())); } entries.sort(Comparator.comparing(RepositoryMetaData::name)); - return new RepositoriesMetaData(entries.toArray(new RepositoryMetaData[entries.size()])); + return new RepositoriesMetaData(entries); } @Override @@ -62,7 +62,7 @@ public class RepositoriesMetaDataSerializationTests extends AbstractDiffableSeri } else { entries.remove(randomIntBetween(0, entries.size() - 1)); } - return new RepositoriesMetaData(entries.toArray(new RepositoryMetaData[entries.size()])); + return new RepositoriesMetaData(entries); } public Settings randomSettings() { @@ -94,7 +94,7 @@ public class RepositoriesMetaDataSerializationTests extends AbstractDiffableSeri repos.add(new RepositoryMetaData(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings())); } } - return new RepositoriesMetaData(repos.toArray(new RepositoryMetaData[repos.size()])); + return new RepositoriesMetaData(repos); } @Override @@ -114,7 +114,7 @@ public class RepositoriesMetaDataSerializationTests extends AbstractDiffableSeri assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); List repos = repositoriesMetaData.repositories(); repos.sort(Comparator.comparing(RepositoryMetaData::name)); - return new RepositoriesMetaData(repos.toArray(new RepositoryMetaData[repos.size()])); + return new RepositoriesMetaData(repos); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java index b85a3480fa7..f3ed04ed22d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java @@ -64,7 +64,7 @@ public class LicensesMetaDataSerializationTests extends ESTestCase { License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(2)); LicensesMetaData licensesMetaData = new LicensesMetaData(license, Version.CURRENT); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repo", "fs", Settings.EMPTY); - RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(repositoryMetaData); + RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(Collections.singletonList(repositoryMetaData)); final MetaData.Builder metaDataBuilder = MetaData.builder(); if (randomBoolean()) { // random order of insertion metaDataBuilder.putCustom(licensesMetaData.getWriteableName(), licensesMetaData); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetaDataSerializationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetaDataSerializationTests.java index 50a7fec4749..0556b8535e4 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetaDataSerializationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetaDataSerializationTests.java @@ -47,7 +47,7 @@ public class WatcherMetaDataSerializationTests extends ESTestCase { boolean manuallyStopped = randomBoolean(); WatcherMetaData watcherMetaData = new WatcherMetaData(manuallyStopped); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repo", "fs", Settings.EMPTY); - RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(repositoryMetaData); + RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(Collections.singletonList(repositoryMetaData)); final MetaData.Builder metaDataBuilder = MetaData.builder(); if (randomBoolean()) { // random order of insertion metaDataBuilder.putCustom(watcherMetaData.getWriteableName(), watcherMetaData); From c5dc60718f72ba39b3f9b7ed7c973c2a948ef673 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 1 May 2018 13:20:19 -0700 Subject: [PATCH 61/68] [DOCS] Fix 6.4-specific link in changelog (#30314) --- docs/CHANGELOG.asciidoc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index c35f9494916..0fb3bae6a6d 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -63,7 +63,7 @@ written to by an older Elasticsearch after writing to it with a newer Elasticsea [float] === Enhancements -<> ({pull}30255[#30255]) +{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255]) Added new "Request" object flavored request methods. Prefer these instead of the multi-argument versions. ({pull}29623[#29623]) @@ -74,9 +74,7 @@ multi-argument versions. ({pull}29623[#29623]) Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) -=== Regressions -=== Known Issues //[float] //=== Regressions From 00b21f886a3e7e43cf095b4a0bea951d786b189e Mon Sep 17 00:00:00 2001 From: Paul Sanwald Date: Tue, 1 May 2018 13:38:22 -0700 Subject: [PATCH 62/68] Fix failure for validate API on a terms query (#29483) * WIP commit to try calling rewrite on coordinating node during TransportSearchAction * Use re-written query instead of using the original query * fix incorrect/unused imports and wildcarding * add error handling for cases where an exception is thrown * correct exception handling such that integration tests pass successfully * fix additional case covered by IndicesOptionsIntegrationIT. * add integration test case that verifies queries are now valid * add optional value for index * address review comments: catch superclass of XContentParseException fixes #29483 --- .../validate/query/QueryExplanation.java | 12 +++++- .../query/TransportValidateQueryAction.java | 38 ++++++++++++++++++- .../index/query/Rewriteable.java | 3 +- .../validate/SimpleValidateQueryIT.java | 19 ++++++++++ 4 files changed, 68 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java index df9c12c95f4..1438daf29fd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java @@ -75,7 +75,11 @@ public class QueryExplanation implements Streamable { @Override public void readFrom(StreamInput in) throws IOException { - index = in.readString(); + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + index = in.readOptionalString(); + } else { + index = in.readString(); + } if (in.getVersion().onOrAfter(Version.V_5_4_0)) { shard = in.readInt(); } else { @@ -88,7 +92,11 @@ public class QueryExplanation implements Streamable { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(index); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeOptionalString(index); + } else { + out.writeString(index); + } if (out.getVersion().onOrAfter(Version.V_5_4_0)) { out.writeInt(shard); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 0513a37e4fe..5be321734b5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -38,8 +38,11 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; @@ -54,6 +57,7 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.function.LongSupplier; public class TransportValidateQueryAction extends TransportBroadcastAction { @@ -71,7 +75,39 @@ public class TransportValidateQueryAction extends TransportBroadcastAction listener) { request.nowInMillis = System.currentTimeMillis(); - super.doExecute(task, request, listener); + LongSupplier timeProvider = () -> request.nowInMillis; + ActionListener rewriteListener = ActionListener.wrap(rewrittenQuery -> { + request.query(rewrittenQuery); + super.doExecute(task, request, listener); + }, + ex -> { + if (ex instanceof IndexNotFoundException || + ex instanceof IndexClosedException) { + listener.onFailure(ex); + } + List explanations = new ArrayList<>(); + explanations.add(new QueryExplanation(null, + QueryExplanation.RANDOM_SHARD, + false, + null, + ex.getMessage())); + listener.onResponse( + new ValidateQueryResponse( + false, + explanations, + // totalShards is documented as "the total shards this request ran against", + // which is 0 since the failure is happening on the coordinating node. + 0, + 0 , + 0, + null)); + }); + if (request.query() == null) { + rewriteListener.onResponse(request.query()); + } else { + Rewriteable.rewriteAndFetch(request.query(), searchService.getRewriteContext(timeProvider), + rewriteListener); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/Rewriteable.java b/server/src/main/java/org/elasticsearch/index/query/Rewriteable.java index 492130527e8..ba8d6b84d53 100644 --- a/server/src/main/java/org/elasticsearch/index/query/Rewriteable.java +++ b/server/src/main/java/org/elasticsearch/index/query/Rewriteable.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.query; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.ParsingException; import java.io.IOException; import java.util.ArrayList; @@ -111,7 +112,7 @@ public interface Rewriteable { } } rewriteResponse.onResponse(builder); - } catch (IOException ex) { + } catch (IOException|IllegalArgumentException|ParsingException ex) { rewriteResponse.onFailure(ex); } } diff --git a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 3c4666148d8..34501ba8a1b 100644 --- a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -29,6 +29,8 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.indices.TermsLookup; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -330,4 +332,21 @@ public class SimpleValidateQueryIT extends ESIntegTestCase { assertThat(response.isValid(), equalTo(true)); } } + + public void testExplainTermsQueryWithLookup() throws Exception { + client().admin().indices().prepareCreate("twitter") + .addMapping("_doc", "user", "type=integer", "followers", "type=integer") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2)).get(); + client().prepareIndex("twitter", "_doc", "1") + .setSource("followers", new int[] {1, 2, 3}).get(); + refresh(); + + TermsQueryBuilder termsLookupQuery = QueryBuilders.termsLookupQuery("user", new TermsLookup("twitter", "_doc", "1", "followers")); + ValidateQueryResponse response = client().admin().indices().prepareValidateQuery("twitter") + .setTypes("_doc") + .setQuery(termsLookupQuery) + .setExplain(true) + .execute().actionGet(); + assertThat(response.isValid(), is(true)); + } } From 6506edfd9c62a666fef85e021a5c6cd66f08c320 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 1 May 2018 13:46:33 -0700 Subject: [PATCH 63/68] Fix a reference to match_phrase_prefix in the match query docs. (#30282) --- docs/reference/query-dsl/match-query.asciidoc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index 56874f25a85..acff4d3b036 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -186,8 +186,7 @@ process. It does not support field name prefixes, wildcard characters, or other "advanced" features. For this reason, chances of it failing are very small / non existent, and it provides an excellent behavior when it comes to just analyze and run that text as a query behavior (which is -usually what a text search box does). Also, the `phrase_prefix` type can -provide a great "as you type" behavior to automatically load search -results. +usually what a text search box does). Also, the <> +type can provide a great "as you type" behavior to automatically load search results. ************************************************** From 86addc0c8baa94f337ea02f55777756b2eddcc22 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 1 May 2018 13:47:24 -0700 Subject: [PATCH 64/68] [DOCS] Adds PKI realm configuration details (#30225) --- .../configuring-pki-realm.asciidoc | 176 ++++++++++++++++++ .../authentication/pki-realm.asciidoc | 170 +---------------- .../docs/en/security/configuring-es.asciidoc | 2 + 3 files changed, 182 insertions(+), 166 deletions(-) create mode 100644 x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc diff --git a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc new file mode 100644 index 00000000000..f66a82b0664 --- /dev/null +++ b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc @@ -0,0 +1,176 @@ +[role="xpack"] +[[configuring-pki-realm]] +=== Configuring a PKI realm + +You can configure {security} to use Public Key Infrastructure (PKI) certificates +to authenticate users in {es}. This requires clients to present X.509 +certificates. + +NOTE: You cannot use PKI certificates to authenticate users in {kib}. + +To use PKI in {es}, you configure a PKI realm, enable client authentication on +the desired network layers (transport or http), and map the Distinguished Names +(DNs) from the user certificates to {security} roles in the role mapping file. + +You can also use a combination of PKI and username/password authentication. For +example, you can enable SSL/TLS on the transport layer and define a PKI realm to +require transport clients to authenticate with X.509 certificates, while still +authenticating HTTP traffic using username and password credentials. You can +also set `xpack.security.transport.ssl.client_authentication` to `optional` to +allow clients without certificates to authenticate with other credentials. + +IMPORTANT: You must enable SSL/TLS and enable client authentication to use PKI. + +For more information, see {xpack-ref}/pki-realm.html[PKI User Authentication]. + +. Add a realm configuration of type `pki` to `elasticsearch.yml` under the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm +`type` to `pki`. If you are configuring multiple realms, you should also +explicitly set the `order` attribute. See <> for all of the +options you can set for a `pki` realm. ++ +-- +For example, the following snippet shows the most basic `pki` realm configuration: + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + pki1: + type: pki +------------------------------------------------------------ + +With this configuration, any certificate trusted by the SSL/TLS layer is accepted +for authentication. The username is the common name (CN) extracted from the DN +of the certificate. + +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. If you also want to use the +`native` or `file` realms, you must include them in the realm chain. + +If you want to use something other than the CN of the DN as the username, you +can specify a regex to extract the desired username. For example, the regex in +the following configuration extracts the email address from the DN: + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + pki1: + type: pki + username_pattern: "EMAILADDRESS=(.*?)(?:,|$)" +------------------------------------------------------------ +-- + +. Restart {es}. + +. <>. + +. Enable client authentication on the desired network layers (transport or http). ++ +-- +//TBD: This step might need to be split into a separate topic with additional details +//about setting up client authentication. +The PKI realm relies on the TLS settings of the node's network interface. The +realm can be configured to be more restrictive than the underlying network +connection - that is, it is possible to configure the node such that some +connections are accepted by the network interface but then fail to be +authenticated by the PKI realm. However, the reverse is not possible. The PKI +realm cannot authenticate a connection that has been refused by the network +interface. + +In particular this means: + +* The transport or http interface must request client certificates by setting + `client_authentication` to `optional` or `required`. +* The interface must _trust_ the certificate that is presented by the client + by configuring either the `truststore` or `certificate_authorities` paths, + or by setting `verification_mode` to `none`. See + <> for an explanation of this + setting. +* The _protocols_ supported by the interface must be compatible with those + used by the client. + +The relevant network interface (transport or http) must be configured to trust +any certificate that is to be used within the PKI realm. However, it possible to +configure the PKI realm to trust only a _subset_ of the certificates accepted +by the network interface. This is useful when the SSL/TLS layer trusts clients +with certificates that are signed by a different CA than the one that signs your +users' certificates. + +To configure the PKI realm with its own truststore, specify the `truststore.path` +option. For example: + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + pki1: + type: pki + truststore: + path: "/path/to/pki_truststore.jks" + password: "x-pack-test-password" +------------------------------------------------------------ + +The `certificate_authorities` option can be used as an alternative to the +`truststore.path` setting. +-- + +. Map roles for PKI users. ++ +-- +You map roles for PKI users through the +<> or by using a file stored on +each node. When a user authenticates against a PKI realm, the privileges for +that user are the union of all privileges defined by the roles to which the +user is mapped. + +You identify a user by the distinguished name in their certificate. +For example, the following mapping configuration maps `John Doe` to the +`user` role: + +Using the role-mapping API: +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/users +{ + "roles" : [ "user" ], + "rules" : { "field" : { + "dn" : "cn=John Doe,ou=example,o=com" <1> + } }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE +<1> The distinguished name (DN) of a PKI user. + +Or, alternatively, configured in a role-mapping file: +[source, yaml] +------------------------------------------------------------ +user: <1> + - "cn=John Doe,ou=example,o=com" <2> +------------------------------------------------------------ +<1> The name of a role. +<2> The distinguished name (DN) of a PKI user. + +The disinguished name for a PKI user follows X.500 naming conventions which +place the most specific fields (like `cn` or `uid`) at the beginning of the +name, and the most general fields (like `o` or `dc`) at the end of the name. +Some tools, such as _openssl_, may print out the subject name in a different + format. + +One way that you can determine the correct DN for a certificate is to use the +<> (use the relevant PKI +certificate as the means of authentication) and inspect the metadata field in +the result. The user's distinguished name will be populated under the `pki_dn` +key. You can also use the authenticate API to validate your role mapping. + +For more information, see +{xpack-ref}/mapping-roles.html[Mapping Users and Groups to Roles]. +-- \ No newline at end of file diff --git a/x-pack/docs/en/security/authentication/pki-realm.asciidoc b/x-pack/docs/en/security/authentication/pki-realm.asciidoc index 2971f6e3887..47f9670d840 100644 --- a/x-pack/docs/en/security/authentication/pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/pki-realm.asciidoc @@ -1,5 +1,5 @@ [[pki-realm]] -=== PKI User Authentication +=== PKI user authentication You can configure {security} to use Public Key Infrastructure (PKI) certificates to authenticate users in {es}. This requires clients to present X.509 @@ -12,171 +12,9 @@ the desired network layers (transport or http), and map the Distinguished Names (DNs) from the user certificates to {security} roles in the <>. -You can also use a combination of PKI and username/password authentication. For -example, you can enable SSL/TLS on the transport layer and define a PKI realm to -require transport clients to authenticate with X.509 certificates, while still -authenticating HTTP traffic using username and password credentials. You can also set -`xpack.security.transport.ssl.client_authentication` to `optional` to allow clients without -certificates to authenticate with other credentials. - -IMPORTANT: You must enable SSL/TLS and enabled client authentication to use PKI. - For more information, see <>. - -==== PKI Realm Configuration - -Like other realms, you configure options for a `pki` realm under the -`xpack.security.authc.realms` namespace in `elasticsearch.yml`. - -To configure a `pki` realm: - -. Add a realm configuration of type `pki` to `elasticsearch.yml` under the -`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to -`pki`. If you are configuring multiple realms, you should also explicitly set -the `order` attribute. See <> for all of the options you can set -for a `pki` realm. -+ -For example, the following snippet shows the most basic `pki` realm configuration: -+ -[source, yaml] ------------------------------------------------------------- -xpack: - security: - authc: - realms: - pki1: - type: pki ------------------------------------------------------------- -+ -With this configuration, any certificate trusted by the SSL/TLS layer is accepted -for authentication. The username is the common name (CN) extracted from the DN -of the certificate. -+ -IMPORTANT: When you configure realms in `elasticsearch.yml`, only the -realms you specify are used for authentication. If you also want to use the -`native` or `file` realms, you must include them in the realm chain. -+ -If you want to use something other than the CN of the DN as the username, you -can specify a regex to extract the desired username. For example, the regex in -the following configuration extracts the email address from the DN: -+ -[source, yaml] ------------------------------------------------------------- -xpack: - security: - authc: - realms: - pki1: - type: pki - username_pattern: "EMAILADDRESS=(.*?)(?:,|$)" ------------------------------------------------------------- -+ -. Restart Elasticsearch. - -[[pki-ssl-config]] -==== PKI and SSL Settings - -The PKI realm relies on the SSL settings of the node's network interface -(transport or http). The realm can be configured to be more restrictive than -the underlying network connection - that is, it is possible to configure the -node such that some connections are accepted by the network interface but then -fail to be authenticated by the PKI realm. However the reverse is not possible -- the PKI realm cannot authenticate a connection that has been refused by the -network interface. - -In particular this means: - -* The transport or http interface must request client certificates by setting - `client_authentication` to `optional` or `required`. -* The interface must _trust_ the certificate that is presented by the client - by configuring either the `truststore` or `certificate_authorities` paths, - or by setting `verification_mode` to `none`. -+ -See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`] -for an explanation of this setting. - -* The _protocols_ supported by the interface must be compatible with those - used by the client. - - -The relevant network interface (transport or http) must be configured to trust -any certificate that is to be used within the PKI realm. However it possible to -configure the PKI realm to trust only a _subset_ of the certificates accepted -by the network interface. -This is useful when the SSL/TLS layer trusts clients with certificates that are -signed by a different CA than the one that signs your users' certificates. - -To configure the PKI realm with its own truststore, specify the -`truststore.path` option as below: - -[source, yaml] ------------------------------------------------------------- -xpack: - security: - authc: - realms: - pki1: - type: pki - truststore: - path: "/path/to/pki_truststore.jks" - password: "x-pack-test-password" ------------------------------------------------------------- - -The `certificate_authorities` option may be used as an alternative to the -`truststore.path` setting. - +See {ref}/configuring-pki-realm.html[Configuring a PKI realm]. [[pki-settings]] -===== PKI Realm Settings +==== PKI Realm Settings -See {ref}/security-settings.html#ref-pki-settings[PKI Realm Settings]. - -[[assigning-roles-pki]] -==== Mapping Roles for PKI Users - -You map roles for PKI users through the -{ref}/security-api-role-mapping.html[role-mapping API], or by using a file stored on -each node. When a user authenticates against a PKI realm, the privileges for -that user are the union of all privileges defined by the roles to which the -user is mapped. - -You identify a user by the distinguished name in their certificate. -For example, the following mapping configuration maps `John Doe` to the -`user` role: - -Using the role-mapping API: -[source,js] --------------------------------------------------- -PUT _xpack/security/role_mapping/users -{ - "roles" : [ "user" ], - "rules" : { "field" : { - "dn" : "cn=John Doe,ou=example,o=com" <1> - } }, - "enabled": true -} --------------------------------------------------- -// CONSOLE -<1> The distinguished name (DN) of a PKI user. - -Or, alternatively, configured in a role-mapping file: -[source, yaml] ------------------------------------------------------------- -user: <1> - - "cn=John Doe,ou=example,o=com" <2> ------------------------------------------------------------- -<1> The name of a role. -<2> The distinguished name (DN) of a PKI user. - -The disinguished name for a PKI user follows X.500 naming conventions which -place the most specific fields (like `cn` or `uid`) at the beginning of the -name, and the most general fields (like `o` or `dc`) at the end of the name. -Some tools, such as _openssl_, may print out the subject name in a different - format. - -One way that you can determine the correct DN for a certificate is to use the -{ref}/security-api-authenticate.html[authenticate API] (use the relevant PKI -certificate as the means of authentication) and inspect the metadata field in -the result. The user's distinguished name will be populated under the `pki_dn` -key. You can also use the authenticate API to validate your role mapping. - -For more information, see <>. +See {ref}/security-settings.html#ref-pki-settings[PKI realm settings]. diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index 3efc682216f..81e9007d750 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -72,6 +72,7 @@ user API. . Choose which types of realms you want to use to authenticate users. ** <>. +** <>. . Set up roles and users to control access to {es}. For example, to grant _John Doe_ full access to all indices that match @@ -132,5 +133,6 @@ include::securing-communications/configuring-tls-docker.asciidoc[] include::securing-communications/enabling-cipher-suites.asciidoc[] include::securing-communications/separating-node-client-traffic.asciidoc[] include::authentication/configuring-active-directory-realm.asciidoc[] +include::authentication/configuring-pki-realm.asciidoc[] include::{xes-repo-dir}/settings/security-settings.asciidoc[] include::{xes-repo-dir}/settings/audit-settings.asciidoc[] From db44a4cddbfc1aa72bdc9210af9b6d25d2b62a68 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 1 May 2018 14:11:38 -0700 Subject: [PATCH 65/68] [DOCS] Adds file realm configuration details (#30221) --- .../configuring-file-realm.asciidoc | 106 ++++++++++++++++++ .../authentication/file-realm.asciidoc | 105 +---------------- .../docs/en/security/configuring-es.asciidoc | 4 +- 3 files changed, 114 insertions(+), 101 deletions(-) create mode 100644 x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc diff --git a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc new file mode 100644 index 00000000000..8555902e503 --- /dev/null +++ b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc @@ -0,0 +1,106 @@ +[role="xpack"] +[[configuring-file-realm]] +=== Configuring a file realm + +You can manage and authenticate users with the built-in `file` internal realm. +All the data about the users for the `file` realm is stored in two files on each +node in the cluster: `users` and `users_roles`. Both files are located in +`CONFIG_DIR/` and are read on startup. + +[IMPORTANT] +============================== +The `users` and `users_roles` files are managed locally by the node and are +**not** managed globally by the cluster. This means that with a typical +multi-node cluster, the exact same changes need to be applied on each and every +node in the cluster. + +A safer approach would be to apply the change on one of the nodes and have the +files distributed or copied to all other nodes in the cluster (either manually +or using a configuration management system such as Puppet or Chef). +============================== + +The `file` realm is added to the realm chain by default. You don't need to +explicitly configure a `file` realm. + +For more information about file realms, see +{xpack-ref}/file-realm.html[File-based user authentication]. + +. (Optional) Add a realm configuration of type `file` to `elasticsearch.yml` +under the `xpack.security.authc.realms` namespace. At a minimum, you must set +the realm `type` to `file`. If you are configuring multiple realms, you should +also explicitly set the `order` attribute. ++ +-- +//See <> for all of the options you can set for a `file` realm. + +For example, the following snippet shows a `file` realm configuration that sets +the `order` to zero so the realm is checked first: + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + file1: + type: file + order: 0 +------------------------------------------------------------ +-- + +. Restart {es}. + +. Add user information to the `CONFIG_DIR/users` file on each node in the +cluster. ++ +-- +The `users` file stores all the users and their passwords. Each line in the file +represents a single user entry consisting of the username and **hashed** password. + +[source,bash] +---------------------------------------------------------------------- +rdeniro:$2a$10$BBJ/ILiyJ1eBTYoRKxkqbuDEdYECplvxnqQ47uiowE7yGqvCEgj9W +alpacino:$2a$10$cNwHnElYiMYZ/T3K4PvzGeJ1KbpXZp2PfoQD.gfaVdImnHOwIuBKS +jacknich:$2a$10$GYUNWyABV/Ols/.bcwxuBuuaQzV6WIauW6RdboojxcixBq3LtI3ni +---------------------------------------------------------------------- + +{security} uses `bcrypt` to hash the user passwords. + +While it is possible to modify this files directly using any standard text +editor, we strongly recommend using the <> tool to apply the +required changes. + +IMPORTANT: As the administrator of the cluster, it is your responsibility to + ensure the same users are defined on every node in the cluster. + {security} does not deliver any mechanism to guarantee this. + +-- + +. Add role information to the `CONFIG_DIR/users_roles` file on each node +in the cluster. ++ +-- +The `users_roles` file stores the roles associated with the users. For example: + +[source,shell] +-------------------------------------------------- +admin:rdeniro +power_user:alpacino,jacknich +user:jacknich +-------------------------------------------------- + +Each row maps a role to a comma-separated list of all the users that are +associated with that role. + +You can use the <> tool to update this file. You must ensure that +the same changes are made on every node in the cluster. +-- + +. (Optional) Change how often the `users` and `users_roles` files are checked. ++ +-- +By default, {security} checks these files for changes every 5 seconds. You can +change this default behavior by changing the `resource.reload.interval.high` +setting in the `elasticsearch.yml` file (as this is a common setting in {es}, +changing its value may effect other schedules in the system). +-- \ No newline at end of file diff --git a/x-pack/docs/en/security/authentication/file-realm.asciidoc b/x-pack/docs/en/security/authentication/file-realm.asciidoc index 000e204f67f..937537ac1a1 100644 --- a/x-pack/docs/en/security/authentication/file-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/file-realm.asciidoc @@ -1,8 +1,8 @@ [[file-realm]] === File-based User Authentication -You can manage and authenticate users with the built-in `file` internal realm. -With the `file` realm users are defined in local files on each node in the cluster. +You can manage and authenticate users with the built-in `file` realm. +With the `file` realm, users are defined in local files on each node in the cluster. IMPORTANT: As the administrator of the cluster, it is your responsibility to ensure the same users are defined on every node in the cluster. @@ -20,102 +20,7 @@ realms you specify are used for authentication. To use the To define users, {security} provides the {ref}/users-command.html[users] command-line tool. This tool enables you to add and remove users, assign user -roles and manage user passwords. +roles, and manage user passwords. -==== Configuring a File Realm - -The `file` realm is added to the realm chain by default. You don't need to -explicitly configure a `file` realm to manage users with the `users` tool. - -Like other realms, you can configure options for a `file` realm in the -`xpack.security.authc.realms` namespace in `elasticsearch.yml`. - -To configure an `file` realm: - -. Add a realm configuration of type `file` to `elasticsearch.yml` under the -`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to -`file`. If you are configuring multiple realms, you should also explicitly set -the `order` attribute. See <> for all of the options you can set -for a `file` realm. -+ -For example, the following snippet shows a `file` realm configuration that sets -the `order` to zero so the realm is checked first: -+ -[source, yaml] ------------------------------------------------------------- -xpack: - security: - authc: - realms: - file1: - type: file - order: 0 ------------------------------------------------------------- - -. Restart Elasticsearch. - -[[file-realm-settings]] -===== File Realm Settings - -See {ref}/security-settings.html#ref-users-settings[File Realm Settings]. - -==== A Look Under the Hood - -All the data about the users for the `file` realm is stored in two files, `users` -and `users_roles`. Both files are located in `CONFIG_DIR/x-pack/` and are read -on startup. - -By default, {security} checks these files for changes every 5 seconds. You can -change this default behavior by changing the `resource.reload.interval.high` setting in -the `elasticsearch.yml` file (as this is a common setting in Elasticsearch, -changing its value may effect other schedules in the system). - -[IMPORTANT] -============================== -These files are managed locally by the node and are **not** managed -globally by the cluster. This means that with a typical multi-node cluster, -the exact same changes need to be applied on each and every node in the -cluster. - -A safer approach would be to apply the change on one of the nodes and have the -`users` and `users_roles` files distributed/copied to all other nodes in the -cluster (either manually or using a configuration management system such as -Puppet or Chef). -============================== - -While it is possible to modify these files directly using any standard text -editor, we strongly recommend using the {ref}/users-command.html[`bin/elasticsearch-users`] -command-line tool to apply the required changes. - -[float] -[[users-file]] -===== The `users` File -The `users` file stores all the users and their passwords. Each line in the -`users` file represents a single user entry consisting of the username and -**hashed** password. - -[source,bash] ----------------------------------------------------------------------- -rdeniro:$2a$10$BBJ/ILiyJ1eBTYoRKxkqbuDEdYECplvxnqQ47uiowE7yGqvCEgj9W -alpacino:$2a$10$cNwHnElYiMYZ/T3K4PvzGeJ1KbpXZp2PfoQD.gfaVdImnHOwIuBKS -jacknich:$2a$10$GYUNWyABV/Ols/.bcwxuBuuaQzV6WIauW6RdboojxcixBq3LtI3ni ----------------------------------------------------------------------- - -NOTE: {security} uses `bcrypt` to hash the user passwords. - -[float] -[[users_defining-roles]] -==== The `users_roles` File - -The `users_roles` file stores the roles associated with the users, as in the -following example: - -[source,shell] --------------------------------------------------- -admin:rdeniro -power_user:alpacino,jacknich -user:jacknich --------------------------------------------------- - -Each row maps a role to a comma-separated list of all the users that are -associated with that role. +For more information, see +{ref}/configuring-file-realm.html[Configuring a file realm]. \ No newline at end of file diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index 81e9007d750..114fd1cdc4f 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [[configuring-security]] -== Configuring Security in {es} +== Configuring security in {es} ++++ Configuring Security ++++ @@ -72,6 +72,7 @@ user API. . Choose which types of realms you want to use to authenticate users. ** <>. +** <>. ** <>. . Set up roles and users to control access to {es}. @@ -133,6 +134,7 @@ include::securing-communications/configuring-tls-docker.asciidoc[] include::securing-communications/enabling-cipher-suites.asciidoc[] include::securing-communications/separating-node-client-traffic.asciidoc[] include::authentication/configuring-active-directory-realm.asciidoc[] +include::authentication/configuring-file-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] include::{xes-repo-dir}/settings/security-settings.asciidoc[] include::{xes-repo-dir}/settings/audit-settings.asciidoc[] From 6a55eead94a82e6d46cd09853571d87a4e980bdf Mon Sep 17 00:00:00 2001 From: lcawley Date: Tue, 1 May 2018 14:42:29 -0700 Subject: [PATCH 66/68] [DOCS] Removes broken link --- x-pack/docs/en/settings/security-settings.asciidoc | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/x-pack/docs/en/settings/security-settings.asciidoc b/x-pack/docs/en/settings/security-settings.asciidoc index 963d5a5f002..38ceda7d07f 100644 --- a/x-pack/docs/en/settings/security-settings.asciidoc +++ b/x-pack/docs/en/settings/security-settings.asciidoc @@ -701,9 +701,7 @@ Defaults to `CN=(.*?)(?:,\|$)`. `certificate_authorities`:: List of paths to the PEM certificate files that should be used to authenticate a user's certificate as trusted. Defaults to the trusted certificates configured -for SSL. See the {xpack-ref}/pki-realm.html#pki-ssl-config[SSL settings] -section of the PKI realm documentation for more information. -This setting cannot be used with `truststore.path`. +for SSL. This setting cannot be used with `truststore.path`. `truststore.algorithm`:: Algorithm for the truststore. Defaults to `SunX509`. @@ -716,10 +714,7 @@ The password for the truststore. `truststore.path`:: The path of a truststore to use. Defaults to the trusted certificates configured -for SSL. See the -{xpack-ref}/pki-realm.html#pki-ssl-config[SSL settings] section of the PKI realm -documentation for more information. This setting cannot be used with -`certificate_authorities`. +for SSL. This setting cannot be used with `certificate_authorities`. `files.role_mapping`:: Specifies the {xpack-ref}/security-files.html[location] of the From 5c9f08402e54819d64702d9713c5b75bb5bba064 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 1 May 2018 15:16:28 -0700 Subject: [PATCH 67/68] Correct an example in the top-level suggester documentation. (#30224) --- docs/reference/search/suggesters.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/suggesters.asciidoc b/docs/reference/search/suggesters.asciidoc index 2cfd2a4f04a..248992a12e2 100644 --- a/docs/reference/search/suggesters.asciidoc +++ b/docs/reference/search/suggesters.asciidoc @@ -23,7 +23,7 @@ POST twitter/_search }, "suggest" : { "my-suggestion" : { - "text" : "trying out Elasticsearch", + "text" : "tring out Elasticsearch", "term" : { "field" : "message" } From 092dd6cb893210a10360fa19f527734f3b3e6c56 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 1 May 2018 16:05:23 -0700 Subject: [PATCH 68/68] [DOCS] Removes X-Pack Elasticsearch release notes (#30272) --- docs/CHANGELOG.asciidoc | 14 +++++++++++ x-pack/docs/en/index.asciidoc | 3 --- .../en/release-notes/7.0.0-alpha1.asciidoc | 25 ------------------- .../docs/en/release-notes/xpack-xes.asciidoc | 20 --------------- 4 files changed, 14 insertions(+), 48 deletions(-) delete mode 100644 x-pack/docs/en/release-notes/7.0.0-alpha1.asciidoc delete mode 100644 x-pack/docs/en/release-notes/xpack-xes.asciidoc diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 0fb3bae6a6d..20a792d0f78 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -30,6 +30,20 @@ This section summarizes the changes in each release. <> ({pull}30185[#30185]) +Machine Learning:: +* The `max_running_jobs` node property is removed in this release. Use the +`xpack.ml.max_open_jobs` setting instead. For more information, see <>. + +Monitoring:: +* The `xpack.monitoring.collection.interval` setting can no longer be set to `-1` +to disable monitoring data collection. Use `xpack.monitoring.collection.enabled` +and set it to `false` (its default), which was added in 6.3.0. + +Security:: +* The fields returned as part of the mappings section by get index, get +mappings, get field mappings, and field capabilities API are now only the +ones that the user is authorized to access in case field level security is enabled. + //[float] //=== Breaking Java Changes diff --git a/x-pack/docs/en/index.asciidoc b/x-pack/docs/en/index.asciidoc index 595c78ddcf5..bf884cf3324 100644 --- a/x-pack/docs/en/index.asciidoc +++ b/x-pack/docs/en/index.asciidoc @@ -31,8 +31,5 @@ include::commands/index.asciidoc[] :edit_url: include::{es-repo-dir}/index-shared4.asciidoc[] -:edit_url!: -include::release-notes/xpack-xes.asciidoc[] - :edit_url: include::{es-repo-dir}/index-shared5.asciidoc[] diff --git a/x-pack/docs/en/release-notes/7.0.0-alpha1.asciidoc b/x-pack/docs/en/release-notes/7.0.0-alpha1.asciidoc deleted file mode 100644 index 2c264f48e33..00000000000 --- a/x-pack/docs/en/release-notes/7.0.0-alpha1.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -[[xes-7.0.0-alpha1]] -== {es} {xpack} 7.0.0-alpha1 Release Notes - -[float] -[[xes-breaking-7.0.0-alpha1]] -=== Breaking Changes - -Machine Learning:: -* The `max_running_jobs` node property is removed in this release. Use the -`xpack.ml.max_open_jobs` setting instead. For more information, see <>. - -Monitoring:: -* The `xpack.monitoring.collection.interval` setting can no longer be set to `-1` -to disable monitoring data collection. Use `xpack.monitoring.collection.enabled` -and set it to `false` (its default), which was added in 6.3.0. - -Security:: -* The fields returned as part of the mappings section by get index, get -mappings, get field mappings and field capabilities API are now only the -ones that the user is authorized to access in case field level security is enabled. - -See also: - -* {kibana-ref}/xkb-7.0.0-alpha1.html[{kib} {xpack} 7.0.0-alpha1 Release Notes] -* {logstash-ref}/xls-7.0.0-alpha1.html[Logstash {xpack} 7.0.0-alpha1 Release Notes] diff --git a/x-pack/docs/en/release-notes/xpack-xes.asciidoc b/x-pack/docs/en/release-notes/xpack-xes.asciidoc deleted file mode 100644 index e58ef209ecd..00000000000 --- a/x-pack/docs/en/release-notes/xpack-xes.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -[role="xpack"] -[[release-notes-xes]] -= {xpack} Release Notes - -[partintro] --- -This section summarizes the changes in each release for all of the {xpack} -components in {es}. - -* <> - -See also: - -* <> -* {kibana-ref}/release-notes-xkb.html[{kib} {xpack} Release Notes] -* {logstash-ref}/release-notes-xls.html[Logstash {xpack} Release Notes] - --- - -include::7.0.0-alpha1.asciidoc[]