From 8af930c468b72c4e86b6594f12d5e6ee6b8200b0 Mon Sep 17 00:00:00 2001
From: Ignacio Vera <iverase@gmail.com>
Date: Fri, 12 Apr 2019 09:16:33 +0200
Subject: [PATCH 001/112] Improve error message when polygons contains  twice
 the same point in no-consecutive position (#41051) (#41133)

When a polygon contains a self-intersection due to have twice the same point in no-consecutive position, the polygon builder tries to split the polygon. During the split one of the polygons become invalid as it is not closed and an error is thrown which is not related to the real issue.

We detect this situation now and throw a more meaningful error.
---
 .../common/geo/builders/PolygonBuilder.java   | 27 ++++++++++++++-----
 .../common/geo/ShapeBuilderTests.java         | 18 +++++++++++++
 2 files changed, 39 insertions(+), 6 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
index e4751de04bf..97503efc033 100644
--- a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
+++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
@@ -406,7 +406,7 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, org.elasticsearch.
      * @param edges a list of edges to which all edges of the component will be added (could be <code>null</code>)
      * @return number of edges that belong to this component
      */
-    private static int component(final Edge edge, final int id, final ArrayList<Edge> edges) {
+    private static int component(final Edge edge, final int id, final ArrayList<Edge> edges, double[] partitionPoint) {
         // find a coordinate that is not part of the dateline
         Edge any = edge;
         while(any.coordinate.x == +DATELINE || any.coordinate.x == -DATELINE) {
@@ -438,6 +438,9 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, org.elasticsearch.
             if (edges != null) {
                 // found a closed loop - we have two connected components so we need to slice into two distinct components
                 if (visitedEdge.containsKey(current.coordinate)) {
+                    partitionPoint[0] = current.coordinate.x;
+                    partitionPoint[1] = current.coordinate.y;
+                    partitionPoint[2] = current.coordinate.z;
                     if (connectedComponents > 0 && current.next != edge) {
                         throw new InvalidShapeException("Shape contains more than one shared point");
                     }
@@ -479,10 +482,20 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, org.elasticsearch.
      * @param coordinates Array of coordinates to write the result to
      * @return the coordinates parameter
      */
-    private static Coordinate[] coordinates(Edge component, Coordinate[] coordinates) {
+    private static Coordinate[] coordinates(Edge component, Coordinate[] coordinates, double[] partitionPoint) {
         for (int i = 0; i < coordinates.length; i++) {
             coordinates[i] = (component = component.next).coordinate;
         }
+        // First and last coordinates must be equal
+        if (coordinates[0].equals(coordinates[coordinates.length - 1]) == false) {
+            if (partitionPoint[2] == Double.NaN) {
+                throw new InvalidShapeException("Self-intersection at or near point ["
+                    + partitionPoint[0] + "," + partitionPoint[1] + "]");
+            } else {
+                throw new InvalidShapeException("Self-intersection at or near point ["
+                    + partitionPoint[0] + "," + partitionPoint[1] + "," + partitionPoint[2] + "]");
+            }
+        }
         return coordinates;
     }
 
@@ -512,8 +525,9 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, org.elasticsearch.
         final Coordinate[][] points = new Coordinate[numHoles][];
 
         for (int i = 0; i < numHoles; i++) {
-            int length = component(holes[i], -(i+1), null); // mark as visited by inverting the sign
-            points[i] = coordinates(holes[i], new Coordinate[length+1]);
+            double[]  partitionPoint = new double[3];
+            int length = component(holes[i], -(i+1), null, partitionPoint); // mark as visited by inverting the sign
+            points[i] = coordinates(holes[i], new Coordinate[length+1], partitionPoint);
         }
 
         return points;
@@ -524,9 +538,10 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, org.elasticsearch.
 
         for (int i = 0; i < edges.length; i++) {
             if (edges[i].component >= 0) {
-                int length = component(edges[i], -(components.size()+numHoles+1), mainEdges);
+                double[]  partitionPoint = new double[3];
+                int length = component(edges[i], -(components.size()+numHoles+1), mainEdges, partitionPoint);
                 List<Coordinate[]> component = new ArrayList<>();
-                component.add(coordinates(edges[i], new Coordinate[length+1]));
+                component.add(coordinates(edges[i], new Coordinate[length+1], partitionPoint));
                 components.add(component);
             }
         }
diff --git a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
index 32f1b333c4e..3c653db2d15 100644
--- a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
@@ -759,4 +759,22 @@ public class ShapeBuilderTests extends ESTestCase {
 
         assertEquals(expected, pb.toString());
     }
+
+    public void testInvalidSelfCrossingPolygon() {
+        PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder()
+            .coordinate(0, 0)
+            .coordinate(0, 2)
+            .coordinate(1, 1.9)
+            .coordinate(0.5, 1.8)
+            .coordinate(1.5, 1.8)
+            .coordinate(1, 1.9)
+            .coordinate(2, 2)
+            .coordinate(2, 0)
+            .coordinate(0, 0)
+        );
+        Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().buildS4J());
+        assertThat(e.getMessage(), containsString("Self-intersection at or near point ["));
+        e = expectThrows(InvalidShapeException.class, () -> builder.close().buildGeometry());
+        assertThat(e.getMessage(), containsString("Self-intersection at or near point ["));
+    }
 }

From bee892006a5271c09f84ae1d606844e25cf9ad1d Mon Sep 17 00:00:00 2001
From: Alpar Torok <torokalpar@gmail.com>
Date: Fri, 12 Apr 2019 12:31:30 +0300
Subject: [PATCH 002/112] Use the built image in docker tests (#40314)

Instead of allowing docker-compose to rebuild it.
With this change we tag the image with a test label, and use that
in the testing as this is simpler that dealing with a dynamically
generated docker-compose file.
---
 distribution/docker/build.gradle       | 13 +++++++------
 distribution/docker/docker-compose.yml |  8 ++------
 2 files changed, 9 insertions(+), 12 deletions(-)

diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle
index 03556edb7c5..07e8926b2a3 100644
--- a/distribution/docker/build.gradle
+++ b/distribution/docker/build.gradle
@@ -90,10 +90,7 @@ void addCopyDockerfileTask(final boolean oss) {
 }
 
 preProcessFixture {
-  dependsOn taskName("copy", true, "DockerContext")
-  dependsOn taskName("copy", true, "Dockerfile")
-  dependsOn taskName("copy", false, "DockerContext")
-  dependsOn taskName("copy", false, "Dockerfile")
+  dependsOn assemble
 }
 
 postProcessFixture.doLast {
@@ -110,12 +107,16 @@ void addBuildDockerImage(final boolean oss) {
     dependsOn taskName("copy", oss, "Dockerfile")
     List<String> tags
     if (oss) {
-      tags = [ "docker.elastic.co/elasticsearch/elasticsearch-oss:${VersionProperties.elasticsearch}" ]
+      tags = [ 
+        "docker.elastic.co/elasticsearch/elasticsearch-oss:${VersionProperties.elasticsearch}",
+        "elasticsearch-oss:test" 
+      ]
     } else {
       tags = [
         "elasticsearch:${VersionProperties.elasticsearch}",
         "docker.elastic.co/elasticsearch/elasticsearch:${VersionProperties.elasticsearch}",
-        "docker.elastic.co/elasticsearch/elasticsearch-full:${VersionProperties.elasticsearch}"
+        "docker.elastic.co/elasticsearch/elasticsearch-full:${VersionProperties.elasticsearch}",
+        "elasticsearch:test",
       ]
     }
     executable 'docker'
diff --git a/distribution/docker/docker-compose.yml b/distribution/docker/docker-compose.yml
index 3f220aa9e91..3207afd501a 100644
--- a/distribution/docker/docker-compose.yml
+++ b/distribution/docker/docker-compose.yml
@@ -2,18 +2,14 @@
 version: '3'
 services:
   elasticsearch-default:
-    build:
-      context: ./build/docker
-      dockerfile: Dockerfile
+    image: elasticsearch:test
     environment:  
        - cluster.name=elasticsearch-default
        - discovery.type=single-node
     ports:
       - "9200"
   elasticsearch-oss:
-    build:
-      context: ./build/oss-docker
-      dockerfile: Dockerfile
+    image: elasticsearch-oss:test
     environment:  
        - cluster.name=elasticsearch-oss
        - discovery.type=single-node

From ff64314b2b976aad411dede92ec8ffa247e5d5de Mon Sep 17 00:00:00 2001
From: Alpar Torok <torokalpar@gmail.com>
Date: Fri, 12 Apr 2019 12:38:29 +0300
Subject: [PATCH 003/112] Simplify testclusters, don't allow cross project
 clusters (#40972)

* Simplify testclusters, don't allow cross project clusters
---
 .../testclusters/TestClustersPlugin.java      | 196 +++++++++---------
 1 file changed, 93 insertions(+), 103 deletions(-)

diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java
index efbf061fefa..e9586f4c4ba 100644
--- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java
@@ -42,11 +42,11 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
@@ -56,19 +56,18 @@ public class TestClustersPlugin implements Plugin<Project> {
 
     private static final String LIST_TASK_NAME = "listTestClusters";
     private static final String NODE_EXTENSION_NAME = "testClusters";
-    static final String HELPER_CONFIGURATION_NAME = "testclusters";
+    private static final String HELPER_CONFIGURATION_NAME = "testclusters";
     private static final String SYNC_ARTIFACTS_TASK_NAME = "syncTestClustersArtifacts";
     private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1;
     private static final TimeUnit EXECUTOR_SHUTDOWN_TIMEOUT_UNIT = TimeUnit.MINUTES;
 
     private static final Logger logger =  Logging.getLogger(TestClustersPlugin.class);
 
-    // this is static because we need a single mapping across multi project builds, as some of the listeners we use,
-    // like task graph are singletons across multi project builds.
-    private static final Map<Task, List<ElasticsearchCluster>> usedClusters = new ConcurrentHashMap<>();
-    private static final Map<ElasticsearchCluster, Integer> claimsInventory = new ConcurrentHashMap<>();
-    private static final Set<ElasticsearchCluster> runningClusters = Collections.synchronizedSet(new HashSet<>());
-    private static volatile  ExecutorService executorService;
+    private final Map<Task, List<ElasticsearchCluster>> usedClusters = new HashMap<>();
+    private final Map<ElasticsearchCluster, Integer> claimsInventory = new HashMap<>();
+    private final Set<ElasticsearchCluster> runningClusters =new HashSet<>();
+    private final Thread shutdownHook = new Thread(this::shutDownAllClusters);
+    private ExecutorService executorService = Executors.newSingleThreadExecutor();
 
     @Override
     public void apply(Project project) {
@@ -81,10 +80,8 @@ public class TestClustersPlugin implements Plugin<Project> {
         createListClustersTask(project, container);
 
         // create DSL for tasks to mark clusters these use
-        createUseClusterTaskExtension(project);
+        createUseClusterTaskExtension(project, container);
 
-        // There's a single Gradle instance for multi project builds, this means that some configuration needs to be
-        // done only once even if the plugin is applied multiple times as a part of multi project build
         if (rootProject.getConfigurations().findByName(HELPER_CONFIGURATION_NAME) == null) {
             // We use a single configuration on the root project to resolve all testcluster dependencies ( like distros )
             // at once, only once without the need to repeat it for each project. This pays off assuming that most
@@ -95,18 +92,14 @@ public class TestClustersPlugin implements Plugin<Project> {
                     "ES distributions and plugins."
             );
 
-            // When running in the Daemon it's possible for this to hold references to past
-            usedClusters.clear();
-            claimsInventory.clear();
-            runningClusters.clear();
-
             // We have a single task to sync the helper configuration to "artifacts dir"
             // the clusters will look for artifacts there based on the naming conventions.
             // Tasks that use a cluster will add this as a dependency automatically so it's guaranteed to run early in
             // the build.
             rootProject.getTasks().create(SYNC_ARTIFACTS_TASK_NAME, sync -> {
                 sync.getInputs().files((Callable<FileCollection>) helperConfiguration::getAsFileTree);
-                sync.getOutputs().dir(getTestClustersConfigurationExtractDir(project));
+                sync.getOutputs().dir(new File(project.getRootProject().getBuildDir(), "testclusters/extract"));
+                // NOTE: Gradle doesn't allow a lambda here ( fails at runtime )
                 sync.doLast(new Action<Task>() {
                     @Override
                     public void execute(Task task) {
@@ -121,33 +114,33 @@ public class TestClustersPlugin implements Plugin<Project> {
                                 } else {
                                     throw new IllegalArgumentException("Can't extract " + file + " unknown file extension");
                                 }
-                                spec.from(files).into(getTestClustersConfigurationExtractDir(project) + "/" +
+                                spec.from(files).into(new File(project.getRootProject().getBuildDir(), "testclusters/extract") + "/" +
                                     resolvedArtifact.getModuleVersion().getId().getGroup()
                                 );
                             }));
                     }
                 });
             });
-
-            // When we know what tasks will run, we claim the clusters of those task to differentiate between clusters
-            // that are defined in the build script and the ones that will actually be used in this invocation of gradle
-            // we use this information to determine when the last task that required the cluster executed so that we can
-            // terminate the cluster right away and free up resources.
-            configureClaimClustersHook(project);
-
-            // Before each task, we determine if a cluster needs to be started for that task.
-            configureStartClustersHook(project);
-
-            // After each task we determine if there are clusters that are no longer needed.
-            configureStopClustersHook(project);
-
-            // configure hooks to make sure no test cluster processes survive the build
-            configureCleanupHooks(project);
-
-            // Since we have everything modeled in the DSL, add all the required dependencies e.x. the distribution to the
-            // configuration so the user doesn't have to repeat this.
-            autoConfigureClusterDependencies(project, rootProject, container);
         }
+
+        // When we know what tasks will run, we claim the clusters of those task to differentiate between clusters
+        // that are defined in the build script and the ones that will actually be used in this invocation of gradle
+        // we use this information to determine when the last task that required the cluster executed so that we can
+        // terminate the cluster right away and free up resources.
+        configureClaimClustersHook(project);
+
+        // Before each task, we determine if a cluster needs to be started for that task.
+        configureStartClustersHook(project);
+
+        // After each task we determine if there are clusters that are no longer needed.
+        configureStopClustersHook(project);
+
+        // configure hooks to make sure no test cluster processes survive the build
+        configureCleanupHooks(project);
+
+        // Since we have everything modeled in the DSL, add all the required dependencies e.x. the distribution to the
+        // configuration so the user doesn't have to repeat this.
+        autoConfigureClusterDependencies(project, rootProject, container);
     }
 
     private NamedDomainObjectContainer<ElasticsearchCluster> createTestClustersContainerExtension(Project project) {
@@ -158,7 +151,7 @@ public class TestClustersPlugin implements Plugin<Project> {
                 project.getPath(),
                 name,
                 project,
-                getTestClustersConfigurationExtractDir(project),
+                new File(project.getRootProject().getBuildDir(), "testclusters/extract"),
                 new File(project.getBuildDir(), "testclusters")
             )
         );
@@ -178,7 +171,7 @@ public class TestClustersPlugin implements Plugin<Project> {
         );
     }
 
-    private static void createUseClusterTaskExtension(Project project) {
+    private void createUseClusterTaskExtension(Project project, NamedDomainObjectContainer<ElasticsearchCluster> container) {
         // register an extension for all current and future tasks, so that any task can declare that it wants to use a
         // specific cluster.
         project.getTasks().all((Task task) ->
@@ -187,6 +180,12 @@ public class TestClustersPlugin implements Plugin<Project> {
                     "useCluster",
                     new Closure<Void>(project, task) {
                         public void doCall(ElasticsearchCluster cluster) {
+                            if (container.contains(cluster) == false) {
+                                throw new TestClustersException(
+                                    "Task " + task.getPath() + " can't use test cluster from" +
+                                    " another project " + cluster
+                                );
+                            }
                             Object thisObject = this.getThisObject();
                             if (thisObject instanceof Task == false) {
                                 throw new AssertionError("Expected " + thisObject + " to be an instance of " +
@@ -201,35 +200,38 @@ public class TestClustersPlugin implements Plugin<Project> {
         );
     }
 
-    private static void configureClaimClustersHook(Project project) {
-        project.getGradle().getTaskGraph().whenReady(taskExecutionGraph ->
-            taskExecutionGraph.getAllTasks()
-                .forEach(task ->
-                    usedClusters.getOrDefault(task, Collections.emptyList()).forEach(each -> {
-                        synchronized (claimsInventory) {
-                            claimsInventory.put(each, claimsInventory.getOrDefault(each, 0) + 1);
-                        }
-                        each.freeze();
-                    })
-                )
-        );
+    private void configureClaimClustersHook(Project project) {
+        // Once we know all the tasks that need to execute, we claim all the clusters that belong to those and count the
+        // claims so we'll know when it's safe to stop them.
+        project.getGradle().getTaskGraph().whenReady(taskExecutionGraph -> {
+            Set<String> forExecution = taskExecutionGraph.getAllTasks().stream()
+                .map(Task::getPath)
+                .collect(Collectors.toSet());
+
+            usedClusters.forEach((task, listOfClusters) ->
+                listOfClusters.forEach(elasticsearchCluster -> {
+                    if (forExecution.contains(task.getPath())) {
+                        elasticsearchCluster.freeze();
+                        claimsInventory.put(elasticsearchCluster, claimsInventory.getOrDefault(elasticsearchCluster, 0) + 1);
+                    }
+                }));
+
+            logger.info("Claims inventory: {}", claimsInventory);
+        });
     }
 
-    private static void configureStartClustersHook(Project project) {
+    private void configureStartClustersHook(Project project) {
         project.getGradle().addListener(
             new TaskActionListener() {
                 @Override
                 public void beforeActions(Task task) {
                     // we only start the cluster before the actions, so we'll not start it if the task is up-to-date
-                    final List<ElasticsearchCluster> clustersToStart;
-                    synchronized (runningClusters) {
-                        clustersToStart = usedClusters.getOrDefault(task,Collections.emptyList()).stream()
-                            .filter(each -> runningClusters.contains(each) == false)
-                            .collect(Collectors.toList());
-                        runningClusters.addAll(clustersToStart);
-                    }
-                    clustersToStart.forEach(ElasticsearchCluster::start);
-
+                    usedClusters.getOrDefault(task, Collections.emptyList()).stream()
+                        .filter(each -> runningClusters.contains(each) == false)
+                        .forEach(elasticsearchCluster -> {
+                            elasticsearchCluster.start();
+                            runningClusters.add(elasticsearchCluster);
+                        });
                 }
                 @Override
                 public void afterActions(Task task) {}
@@ -237,7 +239,7 @@ public class TestClustersPlugin implements Plugin<Project> {
         );
     }
 
-    private static void configureStopClustersHook(Project project) {
+    private void configureStopClustersHook(Project project) {
         project.getGradle().addListener(
             new TaskExecutionListener() {
                 @Override
@@ -251,25 +253,19 @@ public class TestClustersPlugin implements Plugin<Project> {
                     if (state.getFailure() != null) {
                         // If the task fails, and other tasks use this cluster, the other task will likely never be
                         // executed at all, so we will never get to un-claim and terminate it.
-                        // The downside is that with multi project builds if that other  task is in a different
-                        // project and executing right now, we may terminate the cluster while it's running it.
                         clustersUsedByTask.forEach(each -> each.stop(true));
                     } else {
-                        clustersUsedByTask.forEach(each -> {
-                            synchronized (claimsInventory) {
-                                claimsInventory.put(each, claimsInventory.get(each) - 1);
-                            }
-                        });
-                        final List<ElasticsearchCluster> stoppable;
-                        synchronized (runningClusters) {
-                            stoppable = claimsInventory.entrySet().stream()
-                                .filter(entry -> entry.getValue() == 0)
-                                .filter(entry -> runningClusters.contains(entry.getKey()))
-                                .map(Map.Entry::getKey)
-                                .collect(Collectors.toList());
-                            runningClusters.removeAll(stoppable);
-                        }
-                        stoppable.forEach(each -> each.stop(false));
+                        clustersUsedByTask.forEach(
+                            each -> claimsInventory.put(each, claimsInventory.getOrDefault(each, 0) - 1)
+                        );
+                        claimsInventory.entrySet().stream()
+                            .filter(entry -> entry.getValue() == 0)
+                            .filter(entry -> runningClusters.contains(entry.getKey()))
+                            .map(Map.Entry::getKey)
+                            .forEach(each -> {
+                                each.stop(false);
+                                runningClusters.remove(each);
+                            });
                     }
                 }
                 @Override
@@ -278,10 +274,6 @@ public class TestClustersPlugin implements Plugin<Project> {
         );
     }
 
-    static File getTestClustersConfigurationExtractDir(Project project) {
-        return new File(project.getRootProject().getBuildDir(), "testclusters/extract");
-    }
-
     /**
      * Boilerplate to get testClusters container extension
      *
@@ -354,15 +346,9 @@ public class TestClustersPlugin implements Plugin<Project> {
             })));
     }
 
-    private static void configureCleanupHooks(Project project) {
-        synchronized (runningClusters) {
-            if (executorService == null || executorService.isTerminated()) {
-                executorService = Executors.newSingleThreadExecutor();
-            } else {
-                throw new IllegalStateException("Trying to configure executor service twice");
-            }
-        }
+    private void configureCleanupHooks(Project project) {
         // When the Gradle daemon is used, it will interrupt all threads when the build concludes.
+        // This is our signal to clean up
         executorService.submit(() -> {
             while (true) {
                 try {
@@ -375,17 +361,21 @@ public class TestClustersPlugin implements Plugin<Project> {
             }
         });
 
-        project.getGradle().buildFinished(buildResult -> {
-            logger.info("Build finished");
-            shutdownExecutorService();
-        });
         // When the Daemon is not used, or runs into issues, rely on a shutdown hook
         // When the daemon is used, but does not work correctly and eventually dies off (e.x. due to non interruptible
         // thread in the build) process will be stopped eventually when the daemon dies.
-        Runtime.getRuntime().addShutdownHook(new Thread(TestClustersPlugin::shutDownAllClusters));
+        Runtime.getRuntime().addShutdownHook(shutdownHook);
+
+        // When we don't run into anything out of the ordinary, and the build completes, makes sure to clean up
+        project.getGradle().buildFinished(buildResult -> {
+            shutdownExecutorService();
+            if (false == Runtime.getRuntime().removeShutdownHook(shutdownHook)) {
+                logger.info("Trying to deregister shutdown hook when it was not registered.");
+            }
+        });
     }
 
-    private static void shutdownExecutorService() {
+    private void shutdownExecutorService() {
         executorService.shutdownNow();
         try {
             if (executorService.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, EXECUTOR_SHUTDOWN_TIMEOUT_UNIT) == false) {
@@ -400,13 +390,13 @@ public class TestClustersPlugin implements Plugin<Project> {
         }
     }
 
-    private static void shutDownAllClusters() {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Shutting down all test clusters", new RuntimeException());
-        }
+    private void shutDownAllClusters() {
         synchronized (runningClusters) {
-            runningClusters.forEach(each -> each.stop(true));
-            runningClusters.clear();
+            Iterator<ElasticsearchCluster> iterator = runningClusters.iterator();
+            while (iterator.hasNext()) {
+                iterator.remove();
+                iterator.next().stop(true);
+            }
         }
     }
 

From b74d02944e1203d5817affe6826d37c62c264801 Mon Sep 17 00:00:00 2001
From: David Turner <david.turner@elastic.co>
Date: Fri, 12 Apr 2019 10:45:09 +0100
Subject: [PATCH 004/112] Clarify initial_master_nodes must match node.name
 (#41137)

... and emphasize that this includes any trailing qualifiers.
---
 .../modules/discovery/bootstrapping.asciidoc  | 29 +++++++++++++++++++
 .../discovery-settings.asciidoc               |  8 ++++-
 2 files changed, 36 insertions(+), 1 deletion(-)

diff --git a/docs/reference/modules/discovery/bootstrapping.asciidoc b/docs/reference/modules/discovery/bootstrapping.asciidoc
index 972c0b8a064..f4a775e12df 100644
--- a/docs/reference/modules/discovery/bootstrapping.asciidoc
+++ b/docs/reference/modules/discovery/bootstrapping.asciidoc
@@ -56,6 +56,35 @@ cluster.initial_master_nodes:
   - master-node-hostname
 --------------------------------------------------
 
+[NOTE]
+==================================================
+
+The node names used in this list must exactly match the `node.name` properties
+of the nodes. By default the node name is set to the machine's hostname which
+may or may not be fully-qualified depending on your system configuration. If
+each node name is a fully-qualified domain name such as `master-a.example.com`
+then you must use fully-qualified domain names in the
+`cluster.initial_master_nodes` list too; conversely if your node names are bare
+hostnames (without the `.example.com` suffix) then you must use bare hostnames
+in the `cluster.initial_master_nodes` list. If you use a mix of fully-qualifed
+and bare hostnames, or there is some other mismatch between `node.name` and
+`cluster.initial_master_nodes`, then the cluster will not form successfully and
+you will see log messages like the following.
+
+[source,text]
+--------------------------------------------------
+[master-a.example.com] master not discovered yet, this node has
+not previously joined a bootstrapped (v7+) cluster, and this
+node must discover master-eligible nodes [master-a, master-b] to
+bootstrap a cluster: have discovered [{master-b.example.com}{...
+--------------------------------------------------
+
+This message shows the node names `master-a.example.com` and
+`master-b.example.com` as well as the `cluster.initial_master_nodes` entries
+`master-a` and `master-b`, and it is apparent that they do not match exactly.
+
+==================================================
+
 Like all node settings, it is also possible to specify the initial set of master
 nodes on the command-line that is used to start Elasticsearch:
 
diff --git a/docs/reference/setup/important-settings/discovery-settings.asciidoc b/docs/reference/setup/important-settings/discovery-settings.asciidoc
index 9650a2b1abd..4edf5cfbab5 100644
--- a/docs/reference/setup/important-settings/discovery-settings.asciidoc
+++ b/docs/reference/setup/important-settings/discovery-settings.asciidoc
@@ -58,7 +58,13 @@ cluster.initial_master_nodes:
     `transport.port` if not specified.
 <2> If a hostname resolves to multiple IP addresses then the node will attempt to
     discover other nodes at all resolved addresses.
-<3> Initial master nodes can be identified by their <<node.name,node name>>.
+<3> Initial master nodes can be identified by their <<node.name,`node.name`>>.
+    Make sure that the value here matches the `node.name` exactly. If you use a
+    fully-qualified domain name such as `master-node-a.example.com` for your
+    node names then you must use the fully-qualified name in this list;
+    conversely if `node.name` is a bare hostname without any trailing
+    qualifiers then you must also omit the trailing qualifiers in
+    `cluster.initial_master_nodes`.
 <4> Initial master nodes can also be identified by their IP address.
 <5> If multiple master nodes share an IP address then the port must be used to
     disambiguate them.

From c379206c1e2bae2a88e6d3864617263f79f76d26 Mon Sep 17 00:00:00 2001
From: Nik Everett <nik9000@gmail.com>
Date: Fri, 12 Apr 2019 10:11:14 -0400
Subject: [PATCH 005/112] Fix some documentation urls in rest-api-spec (#40618)
 (#41145)

Fixes some documentation urls in the rest-api-spec. Some of these URLs
pointed to 404s and a few others pointed to deprecated documentation
when we have better documentation now. I'm not consistent about `master`
vs `current` because we're not consistent in other places and I think we
should solve all of those at once with something a little more
automatic.
---
 .../resources/rest-api-spec/api/ingest.delete_pipeline.json     | 2 +-
 .../main/resources/rest-api-spec/api/ingest.get_pipeline.json   | 2 +-
 .../main/resources/rest-api-spec/api/ingest.processor_grok.json | 2 +-
 .../main/resources/rest-api-spec/api/ingest.put_pipeline.json   | 2 +-
 .../src/main/resources/rest-api-spec/api/ingest.simulate.json   | 2 +-
 .../src/test/resources/rest-api-spec/api/license.delete.json    | 2 +-
 .../src/test/resources/rest-api-spec/api/license.get.json       | 2 +-
 .../resources/rest-api-spec/api/license.get_basic_status.json   | 2 +-
 .../resources/rest-api-spec/api/license.get_trial_status.json   | 2 +-
 .../src/test/resources/rest-api-spec/api/license.post.json      | 2 +-
 .../resources/rest-api-spec/api/license.post_start_basic.json   | 2 +-
 .../resources/rest-api-spec/api/license.post_start_trial.json   | 2 +-
 .../resources/rest-api-spec/api/migration.deprecations.json     | 2 +-
 .../resources/rest-api-spec/api/ml.find_file_structure.json     | 2 +-
 .../src/test/resources/rest-api-spec/api/monitoring.bulk.json   | 2 +-
 .../rest-api-spec/api/security.get_user_privileges.json         | 2 +-
 16 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json
index 1c515e45095..c3b51de8620 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json
@@ -1,6 +1,6 @@
 {
   "ingest.delete_pipeline": {
-    "documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html",
     "methods": [ "DELETE" ],
     "url": {
       "path": "/_ingest/pipeline/{id}",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json
index 31725087423..16a07e072b7 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json
@@ -1,6 +1,6 @@
 {
   "ingest.get_pipeline": {
-    "documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html",
     "methods": [ "GET" ],
     "url": {
       "path": "/_ingest/pipeline/{id}",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.processor_grok.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.processor_grok.json
index 55afada7281..bf40be853e2 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.processor_grok.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.processor_grok.json
@@ -1,6 +1,6 @@
 {
   "ingest.processor_grok": {
-    "documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html#grok-processor-rest-get",
     "methods": [ "GET" ],
     "url": {
       "path": "/_ingest/processor/grok",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json
index e4c3c2eb3f9..1ea77901d8d 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json
@@ -1,6 +1,6 @@
 {
   "ingest.put_pipeline": {
-    "documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-pipeline-api.html",
     "methods": [ "PUT" ],
     "url": {
       "path": "/_ingest/pipeline/{id}",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json
index d02f97d81dd..c16008ad6b6 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json
@@ -1,6 +1,6 @@
 {
   "ingest.simulate": {
-    "documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html",
     "methods": [ "GET", "POST" ],
     "url": {
       "path": "/_ingest/pipeline/_simulate",
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.delete.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.delete.json
index 315b283699b..a85552e5575 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.delete.json
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.delete.json
@@ -1,6 +1,6 @@
 {
   "license.delete": {
-    "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.html",
     "methods": ["DELETE"],
     "url": {
       "path": "/_license",
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get.json
index 0de1fb48536..07a570935a6 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get.json
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get.json
@@ -1,6 +1,6 @@
 {
   "license.get": {
-    "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html",
     "methods": ["GET"],
     "url": {
       "path": "/_license",
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_basic_status.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_basic_status.json
index e9823b44908..cfb5608ac40 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_basic_status.json
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_basic_status.json
@@ -1,6 +1,6 @@
 {
   "license.get_basic_status": {
-    "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status.html",
     "methods": ["GET"],
     "url": {
       "path": "/_license/basic_status",
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_trial_status.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_trial_status.json
index 54f6b0a8c7d..daeb4913ad9 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_trial_status.json
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_trial_status.json
@@ -1,6 +1,6 @@
 {
   "license.get_trial_status": {
-    "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status.html",
     "methods": ["GET"],
     "url": {
       "path": "/_license/trial_status",
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post.json
index 23d597a3c19..1bd78a1fb6a 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post.json
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post.json
@@ -1,6 +1,6 @@
 {
   "license.post": {
-    "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.html",
     "methods": ["PUT", "POST"],
     "url": {
       "path": "/_license",
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_basic.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_basic.json
index 2b9da7d47c6..91da4de9ff0 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_basic.json
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_basic.json
@@ -1,6 +1,6 @@
 {
   "license.post_start_basic": {
-    "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html",
     "methods": ["POST"],
     "url": {
       "path": "/_license/start_basic",
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_trial.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_trial.json
index d0e3afcbb1e..55a73df4946 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_trial.json
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_trial.json
@@ -1,6 +1,6 @@
 {
   "license.post_start_trial": {
-    "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html",
     "methods": ["POST"],
     "url": {
       "path": "/_license/start_trial",
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/migration.deprecations.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/migration.deprecations.json
index 989b206919b..7c7ad54f095 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/api/migration.deprecations.json
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/migration.deprecations.json
@@ -1,6 +1,6 @@
 {
   "migration.deprecations": {
-    "documentation": "http://www.elastic.co/guide/en/migration/current/migration-api-deprecation.html",
+    "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html",
     "methods": [ "GET" ],
     "url": {
       "path": "/{index}/_migration/deprecations",
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.find_file_structure.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.find_file_structure.json
index 94b69951e32..4e5550ae824 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.find_file_structure.json
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.find_file_structure.json
@@ -1,6 +1,6 @@
 {
   "ml.find_file_structure": {
-    "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-file-structure.html",
+    "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-find-file-structure.html",
     "methods": [ "POST" ],
     "url": {
       "path": "/_ml/find_file_structure",
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/monitoring.bulk.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/monitoring.bulk.json
index 55ce7b9ba61..a299e2adc4b 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/api/monitoring.bulk.json
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/monitoring.bulk.json
@@ -1,6 +1,6 @@
 {
   "monitoring.bulk": {
-    "documentation": "http://www.elastic.co/guide/en/monitoring/current/appendix-api-bulk.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/es-monitoring.html",
     "methods": ["POST", "PUT"],
     "url": {
       "path": "/_monitoring/bulk",
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.get_user_privileges.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.get_user_privileges.json
index 45f1e4a08c6..b60298f89e0 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.get_user_privileges.json
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.get_user_privileges.json
@@ -1,6 +1,6 @@
 {
   "security.get_user_privileges": {
-    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-privileges.html",
+    "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html",
     "methods": [ "GET" ],
     "url": {
       "path": "/_security/user/_privileges",

From 3be413627976908e8a01922d241a0d61620c8e65 Mon Sep 17 00:00:00 2001
From: James Rodewig <james.rodewig@elastic.co>
Date: Fri, 12 Apr 2019 11:19:21 -0400
Subject: [PATCH 006/112] [DOCS] Fix code block length for Asciidoctor
 migration (#41151)

---
 docs/reference/setup/install/docker.asciidoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc
index 83f80b569a8..9d03edb9e7e 100644
--- a/docs/reference/setup/install/docker.asciidoc
+++ b/docs/reference/setup/install/docker.asciidoc
@@ -82,7 +82,7 @@ The `vm.max_map_count` setting should be set permanently in `/etc/sysctl.conf`:
 --------------------------------------------
 $ grep vm.max_map_count /etc/sysctl.conf
 vm.max_map_count=262144
-----------------------------------
+--------------------------------------------
 
 To apply the setting on a live system type: `sysctl -w vm.max_map_count=262144`
 --

From 62af000ff69fee8f35344916bda2d50d740f4b76 Mon Sep 17 00:00:00 2001
From: Nik Everett <nik9000@gmail.com>
Date: Fri, 12 Apr 2019 11:31:08 -0400
Subject: [PATCH 007/112] Docs: Fix rendering of APIs that mention version
 (#41154)

Fixes rendering the `version_qualified` attribute in the docs. This
attribute includes `-SNAPSHOT` but does not include `-alpha` or `-beta`
and represents the `version` field as returned by `GET /` or
`GET /_cat/plugins`. Without this change we just drop the entire line on
the floor rather than render it so the output looks bad.
---
 docs/Versions.asciidoc | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc
index 80c400e3853..f4dd7181131 100644
--- a/docs/Versions.asciidoc
+++ b/docs/Versions.asciidoc
@@ -1,4 +1,8 @@
 :version:               7.1.0
+////
+bare_version never includes -alpha or -beta
+////
+:bare_version:          7.1.0
 :major-version:         7.x
 :prev-major-version:    6.x
 :lucene_version:        8.0.0
@@ -42,6 +46,7 @@ ifeval::["{release-state}"=="unreleased"]
 :percolator-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/plugin/percolator-client/{version}-SNAPSHOT
 :matrixstats-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/plugin/aggs-matrix-stats-client/{version}-SNAPSHOT
 :rank-eval-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/plugin/rank-eval-client/{version}-SNAPSHOT
+:version_qualified: {bare_version}-SNAPSHOT
 endif::[]
 
 ifeval::["{release-state}"!="unreleased"]
@@ -55,6 +60,7 @@ ifeval::["{release-state}"!="unreleased"]
 :percolator-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/plugin/percolator-client/{version}
 :matrixstats-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/plugin/aggs-matrix-stats-client/{version}
 :rank-eval-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/plugin/rank-eval-client/{version}
+:version_qualified: {bare_version}
 endif::[]
 
 :javadoc-client: {rest-high-level-client-javadoc}/org/elasticsearch/client

From dc1c5c264755a029cc60b4b56d2655c985ab27f6 Mon Sep 17 00:00:00 2001
From: James Rodewig <james.rodewig@elastic.co>
Date: Fri, 12 Apr 2019 12:18:20 -0400
Subject: [PATCH 008/112] [DOCS] Fix code block length for Asciidoctor
 migration (#41153)

---
 docs/reference/indices/split-index.asciidoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc
index ade0a8075d5..d79c0b0e9e2 100644
--- a/docs/reference/indices/split-index.asciidoc
+++ b/docs/reference/indices/split-index.asciidoc
@@ -92,7 +92,7 @@ PUT my_source_index
     "index.number_of_shards" : 1
   }
 }
--------------------------------------------------
+--------------------------------------------------
 // CONSOLE
 
 In order to split an index, the index must be marked as read-only,

From 9f3fae2c591b7a25e58847f86095c10ab589bf00 Mon Sep 17 00:00:00 2001
From: James Rodewig <james.rodewig@elastic.co>
Date: Fri, 12 Apr 2019 12:26:39 -0400
Subject: [PATCH 009/112] [DOCS] Fix code block length for Asciidoctor
 migration (#41152)

---
 docs/reference/search/rank-eval.asciidoc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc
index c549b5e7a68..cf23121f53b 100644
--- a/docs/reference/search/rank-eval.asciidoc
+++ b/docs/reference/search/rank-eval.asciidoc
@@ -44,7 +44,7 @@ GET /my_index/_rank_eval
       "mean_reciprocal_rank": { ... } <3>
    }
 }
-------------------------------
+-----------------------------
 // NOTCONSOLE
 
 <1> a set of typical search requests, together with their provided ratings
@@ -77,7 +77,7 @@ The request section contains several search requests typical to your application
             ]
         }
     ]
-------------------------------
+-----------------------------
 // NOTCONSOLE
 
 <1> the search requests id, used to group result details later 

From ef310886a7860fa5a2b246efff537d6fa1857756 Mon Sep 17 00:00:00 2001
From: Gordon Brown <gordon.brown@elastic.co>
Date: Fri, 12 Apr 2019 10:43:04 -0600
Subject: [PATCH 010/112] Add deprecation check for ILM poll interval <1s
 (#41096)

ILM poll intervals of less than 1 second will not be allowed, so add a
deprecation check for that.

Even though I'm pretty sure zero production clusters will do this, it's
best to be thorough.
---
 .../deprecation/ClusterDeprecationChecks.java | 28 +++++++++++++
 .../xpack/deprecation/DeprecationChecks.java  |  3 +-
 .../ClusterDeprecationChecksTests.java        | 40 +++++++++++++++++++
 3 files changed, 70 insertions(+), 1 deletion(-)

diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java
index 1a71f094fc1..57a474744b6 100644
--- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java
+++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java
@@ -10,6 +10,8 @@ import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.ingest.IngestService;
 import org.elasticsearch.ingest.PipelineConfiguration;
@@ -23,6 +25,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.stream.Collectors;
 
 import static org.elasticsearch.search.SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING;
+import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING;
 
 public class ClusterDeprecationChecks {
     private static final Logger logger = LogManager.getLogger(ClusterDeprecationChecks.class);
@@ -88,4 +91,29 @@ public class ClusterDeprecationChecks {
         }
         return null;
     }
+
+    static DeprecationIssue checkPollIntervalTooLow(ClusterState state) {
+        String pollIntervalString = state.metaData().settings().get(LIFECYCLE_POLL_INTERVAL_SETTING.getKey());
+        if (Strings.isNullOrEmpty(pollIntervalString)) {
+            return null;
+        }
+
+        TimeValue pollInterval;
+        try {
+            pollInterval = TimeValue.parseTimeValue(pollIntervalString, LIFECYCLE_POLL_INTERVAL_SETTING.getKey());
+        } catch (IllegalArgumentException e) {
+            logger.error("Failed to parse [{}] value: [{}]", LIFECYCLE_POLL_INTERVAL_SETTING.getKey(), pollIntervalString);
+            return null;
+        }
+
+        if (pollInterval.compareTo(TimeValue.timeValueSeconds(1)) < 0) {
+            return new DeprecationIssue(DeprecationIssue.Level.CRITICAL,
+                "Index Lifecycle Management poll interval is set too low",
+                "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-8.0.html" +
+                    "#ilm-poll-interval-limit",
+                "The Index Lifecycle Management poll interval setting [" + LIFECYCLE_POLL_INTERVAL_SETTING.getKey() + "] is " +
+                    "currently set to [" + pollIntervalString + "], but must be 1s or greater");
+        }
+        return null;
+    }
 }
diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java
index b70c7c4fa32..3a7dcd786f5 100644
--- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java
+++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java
@@ -34,7 +34,8 @@ public class DeprecationChecks {
     static List<Function<ClusterState, DeprecationIssue>> CLUSTER_SETTINGS_CHECKS =
         Collections.unmodifiableList(Arrays.asList(
             ClusterDeprecationChecks::checkUserAgentPipelines,
-            ClusterDeprecationChecks::checkTemplatesWithTooManyFields
+            ClusterDeprecationChecks::checkTemplatesWithTooManyFields,
+            ClusterDeprecationChecks::checkPollIntervalTooLow
         ));
 
 
diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecksTests.java
index 990958e766c..14158082f08 100644
--- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecksTests.java
+++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecksTests.java
@@ -19,6 +19,7 @@ import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.ingest.IngestService;
 import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.xpack.core.deprecation.DeprecationIssue;
+import org.hamcrest.Matchers;
 
 import java.io.IOException;
 import java.util.Collections;
@@ -26,6 +27,7 @@ import java.util.List;
 
 import static java.util.Collections.singletonList;
 import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING;
 import static org.elasticsearch.xpack.deprecation.DeprecationChecks.CLUSTER_SETTINGS_CHECKS;
 import static org.elasticsearch.xpack.deprecation.IndexDeprecationChecksTests.addRandomFields;
 
@@ -155,4 +157,42 @@ public class ClusterDeprecationChecksTests extends ESTestCase {
                 "to fail if fields are not explicitly specified in the query.");
         assertEquals(singletonList(expected), issues);
     }
+
+    public void testPollIntervalTooLow() {
+        {
+            final String tooLowInterval = randomTimeValue(1, 999, "ms", "micros", "nanos");
+            MetaData badMetaDtata = MetaData.builder()
+                .persistentSettings(Settings.builder()
+                    .put(LIFECYCLE_POLL_INTERVAL_SETTING.getKey(), tooLowInterval)
+                    .build())
+                .build();
+            ClusterState badState = ClusterState.builder(new ClusterName("test"))
+                .metaData(badMetaDtata)
+                .build();
+
+            DeprecationIssue expected = new DeprecationIssue(DeprecationIssue.Level.CRITICAL,
+                "Index Lifecycle Management poll interval is set too low",
+                "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-8.0.html" +
+                    "#ilm-poll-interval-limit",
+                "The Index Lifecycle Management poll interval setting [" + LIFECYCLE_POLL_INTERVAL_SETTING.getKey() + "] is " +
+                    "currently set to [" + tooLowInterval + "], but must be 1s or greater");
+            List<DeprecationIssue> issues = DeprecationChecks.filterChecks(CLUSTER_SETTINGS_CHECKS, c -> c.apply(badState));
+            assertEquals(singletonList(expected), issues);
+        }
+
+        // Test that other values are ok
+        {
+            final String okInterval = randomTimeValue(1, 9999, "d", "h", "s");
+            MetaData okMetaData = MetaData.builder()
+                .persistentSettings(Settings.builder()
+                    .put(LIFECYCLE_POLL_INTERVAL_SETTING.getKey(), okInterval)
+                    .build())
+                .build();
+            ClusterState okState = ClusterState.builder(new ClusterName("test"))
+                .metaData(okMetaData)
+                .build();
+            List<DeprecationIssue> noIssues = DeprecationChecks.filterChecks(CLUSTER_SETTINGS_CHECKS, c -> c.apply(okState));
+            assertThat(noIssues, Matchers.hasSize(0));
+        }
+    }
 }

From 7b190609ab59b9989cddeb8e14acfc4a22391a4e Mon Sep 17 00:00:00 2001
From: Lee Hinman <dakrone@users.noreply.github.com>
Date: Fri, 12 Apr 2019 12:15:44 -0600
Subject: [PATCH 011/112] (7.x) Use environment settings instead of state
 settings for Watcher config (#41158)

Backport of (#41087)

* Use environment settings instead of state settings for Watcher config

Prior to this we used the settings from cluster state to see whether ILM was
enabled of disabled, however, these settings don't accurately reflect the
`xpack.ilm.enabled` setting in `elasticsearch.yml`.

This commit changes to using the `Environment` settings, which correctly reflect
the ILM enabled setting.

Resolves #41042
---
 .../WatcherIndexTemplateRegistryField.java    |  5 +++-
 .../elasticsearch/xpack/watcher/Watcher.java  |  2 +-
 .../support/WatcherIndexTemplateRegistry.java | 10 +++++---
 .../WatcherIndexTemplateRegistryTests.java    | 23 +++++++++++--------
 ...cherWithSecurityClientYamlTestSuiteIT.java |  2 +-
 .../SmokeTestWatcherTestSuiteIT.java          |  2 +-
 .../smoketest/WatcherRestIT.java              |  2 +-
 .../smoketest/WatcherJiraYamlTestSuiteIT.java |  2 +-
 .../WatcherPagerDutyYamlTestSuiteIT.java      |  2 +-
 .../WatcherSlackYamlTestSuiteIT.java          |  2 +-
 10 files changed, 32 insertions(+), 20 deletions(-)

diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java
index ac4b950ea05..4007b06ee7e 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java
@@ -21,7 +21,10 @@ public final class WatcherIndexTemplateRegistryField {
     public static final String TRIGGERED_TEMPLATE_NAME = ".triggered_watches";
     public static final String WATCHES_TEMPLATE_NAME = ".watches";
     public static final String[] TEMPLATE_NAMES = new String[] {
-            HISTORY_TEMPLATE_NAME, TRIGGERED_TEMPLATE_NAME, WATCHES_TEMPLATE_NAME
+        HISTORY_TEMPLATE_NAME, TRIGGERED_TEMPLATE_NAME, WATCHES_TEMPLATE_NAME
+    };
+    public static final String[] TEMPLATE_NAMES_NO_ILM = new String[] {
+        HISTORY_TEMPLATE_NAME_NO_ILM, TRIGGERED_TEMPLATE_NAME, WATCHES_TEMPLATE_NAME
     };
 
     private WatcherIndexTemplateRegistryField() {}
diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java
index 6888019b269..f5f12d4fd24 100644
--- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java
+++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java
@@ -267,7 +267,7 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin, Reloa
             throw new UncheckedIOException(e);
         }
 
-        new WatcherIndexTemplateRegistry(clusterService, threadPool, client, xContentRegistry);
+        new WatcherIndexTemplateRegistry(environment.settings(), clusterService, threadPool, client, xContentRegistry);
 
         // http client
         httpClient = new HttpClient(settings, getSslService(), cryptoService, clusterService);
diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java
index 735bf04c721..4ebcc5a8f41 100644
--- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java
+++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java
@@ -17,6 +17,7 @@ import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.ClusterStateListener;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.xcontent.NamedXContentRegistry;
 import org.elasticsearch.common.xcontent.XContentType;
@@ -63,14 +64,17 @@ public class WatcherIndexTemplateRegistry implements ClusterStateListener {
 
     private static final Logger logger = LogManager.getLogger(WatcherIndexTemplateRegistry.class);
 
+    private final Settings nodeSettings;
     private final Client client;
     private final ThreadPool threadPool;
     private final NamedXContentRegistry xContentRegistry;
     private final ConcurrentMap<String, AtomicBoolean> templateCreationsInProgress = new ConcurrentHashMap<>();
     private final AtomicBoolean historyPolicyCreationInProgress = new AtomicBoolean();
 
-    public WatcherIndexTemplateRegistry(ClusterService clusterService, ThreadPool threadPool, Client client,
+    public WatcherIndexTemplateRegistry(Settings nodeSettings, ClusterService clusterService,
+                                        ThreadPool threadPool, Client client,
                                         NamedXContentRegistry xContentRegistry) {
+        this.nodeSettings = nodeSettings;
         this.client = client;
         this.threadPool = threadPool;
         this.xContentRegistry = xContentRegistry;
@@ -104,7 +108,7 @@ public class WatcherIndexTemplateRegistry implements ClusterStateListener {
     }
 
     private void addTemplatesIfMissing(ClusterState state) {
-        boolean ilmSupported = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(state.metaData().settings());
+        boolean ilmSupported = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(this.nodeSettings);
         final TemplateConfig[] indexTemplates = ilmSupported ? TEMPLATE_CONFIGS : TEMPLATE_CONFIGS_NO_ILM;
         for (TemplateConfig template : indexTemplates) {
             final String templateName = template.getTemplateName();
@@ -153,7 +157,7 @@ public class WatcherIndexTemplateRegistry implements ClusterStateListener {
     }
 
     private void addIndexLifecyclePolicyIfMissing(ClusterState state) {
-        boolean ilmSupported = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(state.metaData().settings());
+        boolean ilmSupported = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(this.nodeSettings);
         if (ilmSupported && historyPolicyCreationInProgress.compareAndSet(false, true)) {
             final LifecyclePolicy policyOnDisk = loadWatcherHistoryPolicy();
 
diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java
index 7ede5313053..bd55e757953 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java
@@ -73,11 +73,13 @@ public class WatcherIndexTemplateRegistryTests extends ESTestCase {
 
     private WatcherIndexTemplateRegistry registry;
     private NamedXContentRegistry xContentRegistry;
+    private ClusterService clusterService;
+    private ThreadPool threadPool;
     private Client client;
 
     @Before
     public void createRegistryAndClient() {
-        ThreadPool threadPool = mock(ThreadPool.class);
+        threadPool = mock(ThreadPool.class);
         when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
         when(threadPool.generic()).thenReturn(EsExecutors.newDirectExecutorService());
 
@@ -94,14 +96,14 @@ public class WatcherIndexTemplateRegistryTests extends ESTestCase {
             return null;
         }).when(indicesAdminClient).putTemplate(any(PutIndexTemplateRequest.class), any(ActionListener.class));
 
-        ClusterService clusterService = mock(ClusterService.class);
+        clusterService = mock(ClusterService.class);
         List<NamedXContentRegistry.Entry> entries = new ArrayList<>(ClusterModule.getNamedXWriteables());
         entries.addAll(Arrays.asList(
             new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE),
                 (p) -> TimeseriesLifecycleType.INSTANCE),
             new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)));
         xContentRegistry = new NamedXContentRegistry(entries);
-        registry = new WatcherIndexTemplateRegistry(clusterService, threadPool, client, xContentRegistry);
+        registry = new WatcherIndexTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, xContentRegistry);
     }
 
     public void testThatNonExistingTemplatesAreAddedImmediately() {
@@ -130,9 +132,10 @@ public class WatcherIndexTemplateRegistryTests extends ESTestCase {
         DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT);
         DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
 
-        ClusterChangedEvent event = createClusterChangedEvent(Settings.builder()
+        registry = new WatcherIndexTemplateRegistry(Settings.builder()
             .put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), false).build(),
-            Collections.emptyList(), Collections.emptyMap(), nodes);
+            clusterService, threadPool, client, xContentRegistry);
+        ClusterChangedEvent event = createClusterChangedEvent(Settings.EMPTY, Collections.emptyList(), Collections.emptyMap(), nodes);
         registry.clusterChanged(event);
         ArgumentCaptor<PutIndexTemplateRequest> argumentCaptor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class);
         verify(client.admin().indices(), times(3)).putTemplate(argumentCaptor.capture(), anyObject());
@@ -142,8 +145,9 @@ public class WatcherIndexTemplateRegistryTests extends ESTestCase {
             WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME), nodes);
         registry.clusterChanged(newEvent);
         ArgumentCaptor<PutIndexTemplateRequest> captor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class);
-        verify(client.admin().indices(), times(4)).putTemplate(captor.capture(), anyObject());
+        verify(client.admin().indices(), times(5)).putTemplate(captor.capture(), anyObject());
         captor.getAllValues().forEach(req -> assertNull(req.settings().get("index.lifecycle.name")));
+        verify(client, times(0)).execute(eq(PutLifecycleAction.INSTANCE), anyObject(), anyObject());
     }
 
     public void testThatNonExistingPoliciesAreAddedImmediately() {
@@ -171,9 +175,10 @@ public class WatcherIndexTemplateRegistryTests extends ESTestCase {
         DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT);
         DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
 
-        ClusterChangedEvent event = createClusterChangedEvent(Settings.builder()
-                .put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), false).build(),
-            Collections.emptyList(), Collections.emptyMap(), nodes); 
+        registry = new WatcherIndexTemplateRegistry(Settings.builder()
+            .put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), false).build(),
+            clusterService, threadPool, client, xContentRegistry);
+        ClusterChangedEvent event = createClusterChangedEvent(Settings.EMPTY, Collections.emptyList(), Collections.emptyMap(), nodes);
         registry.clusterChanged(event);
         verify(client, times(0)).execute(eq(PutLifecycleAction.INSTANCE), anyObject(), anyObject());
     }
diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java
index 5eabd512dc5..879be233fa1 100644
--- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java
+++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java
@@ -76,7 +76,7 @@ public class SmokeTestWatcherWithSecurityClientYamlTestSuiteIT extends ESClientY
         });
 
         assertBusy(() -> {
-            for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) {
+            for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM) {
                 ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template",
                         singletonMap("name", template), emptyList(), emptyMap());
                 assertThat(templateExistsResponse.getStatusCode(), is(200));
diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java
index a7350fcff03..8f30ec41711 100644
--- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java
+++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java
@@ -63,7 +63,7 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase {
         });
 
         assertBusy(() -> {
-            for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) {
+            for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM) {
                 Response templateExistsResponse = adminClient().performRequest(new Request("HEAD", "/_template/" + template));
                 assertThat(templateExistsResponse.getStatusLine().getStatusCode(), is(200));
             }
diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java
index 1d7759b28b9..19c82c8cef7 100644
--- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java
+++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java
@@ -58,7 +58,7 @@ public class WatcherRestIT extends ESClientYamlSuiteTestCase {
         });
 
         assertBusy(() -> {
-            for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) {
+            for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM) {
                 ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template",
                     singletonMap("name", template), emptyList(), emptyMap());
                 assertThat(templateExistsResponse.getStatusCode(), is(200));
diff --git a/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java b/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java
index 0eca4d03dfd..8f8792f2697 100644
--- a/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java
+++ b/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java
@@ -37,7 +37,7 @@ public class WatcherJiraYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
 
     @Before
     public void startWatcher() throws Exception {
-        final List<String> watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES);
+        final List<String> watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM);
         assertBusy(() -> {
             try {
                 getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap());
diff --git a/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java b/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java
index e111bbd1069..b9a628f71f9 100644
--- a/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java
+++ b/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java
@@ -37,7 +37,7 @@ public class WatcherPagerDutyYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
 
     @Before
     public void startWatcher() throws Exception {
-        final List<String> watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES);
+        final List<String> watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM);
         assertBusy(() -> {
             try {
                 getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap());
diff --git a/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java b/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java
index 7021548109f..01eeae442b2 100644
--- a/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java
+++ b/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java
@@ -37,7 +37,7 @@ public class WatcherSlackYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
 
     @Before
     public void startWatcher() throws Exception {
-        final List<String> watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES);
+        final List<String> watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM);
         assertBusy(() -> {
             try {
                 getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap());

From e7375368d6e365343f8b21a09b6baef8ff61d304 Mon Sep 17 00:00:00 2001
From: Yogesh Gaikwad <902768+bizybot@users.noreply.github.com>
Date: Sat, 13 Apr 2019 04:36:29 +1000
Subject: [PATCH 012/112] Remove nested loop in IndicesStatsResponse (#40988)
 (#41138)

This commit removes nested loop in `getIndices`.
---
 .../admin/indices/stats/IndexStats.java       | 20 ++++++
 .../indices/stats/IndicesStatsResponse.java   | 28 +++-----
 .../stats/IndicesStatsResponseTests.java      | 71 ++++++++++++++++++-
 3 files changed, 99 insertions(+), 20 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java
index a36821a4b65..c66cbc38850 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java
@@ -108,4 +108,24 @@ public class IndexStats implements Iterable<IndexShardStats> {
         primary = stats;
         return stats;
     }
+
+    public static class IndexStatsBuilder {
+        private final String indexName;
+        private final String uuid;
+        private final List<ShardStats> shards = new ArrayList<>();
+
+        public IndexStatsBuilder(String indexName, String uuid) {
+            this.indexName = indexName;
+            this.uuid = uuid;
+        }
+
+        public IndexStatsBuilder add(ShardStats shardStats) {
+            shards.add(shardStats);
+            return this;
+        }
+
+        public IndexStats build() {
+            return new IndexStats(indexName, uuid, shards.toArray(new ShardStats[shards.size()]));
+        }
+    }
 }
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java
index cc563948160..0540bc3ad5c 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java
@@ -19,6 +19,7 @@
 
 package org.elasticsearch.action.admin.indices.stats;
 
+import org.elasticsearch.action.admin.indices.stats.IndexStats.IndexStatsBuilder;
 import org.elasticsearch.action.support.DefaultShardOperationFailedException;
 import org.elasticsearch.action.support.broadcast.BroadcastResponse;
 import org.elasticsearch.cluster.routing.ShardRouting;
@@ -29,12 +30,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.index.Index;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
+import java.util.stream.Collectors;
 
 import static java.util.Collections.unmodifiableMap;
 
@@ -83,26 +82,17 @@ public class IndicesStatsResponse extends BroadcastResponse {
         if (indicesStats != null) {
             return indicesStats;
         }
-        Map<String, IndexStats> indicesStats = new HashMap<>();
 
-        Set<Index> indices = new HashSet<>();
+        final Map<String, IndexStatsBuilder> indexToIndexStatsBuilder = new HashMap<>();
         for (ShardStats shard : shards) {
-            indices.add(shard.getShardRouting().index());
+            Index index = shard.getShardRouting().index();
+            IndexStatsBuilder indexStatsBuilder = indexToIndexStatsBuilder.computeIfAbsent(index.getName(),
+                    k -> new IndexStatsBuilder(k, index.getUUID()));
+            indexStatsBuilder.add(shard);
         }
 
-        for (Index index : indices) {
-            List<ShardStats> shards = new ArrayList<>();
-            String indexName = index.getName();
-            for (ShardStats shard : this.shards) {
-                if (shard.getShardRouting().getIndexName().equals(indexName)) {
-                    shards.add(shard);
-                }
-            }
-            indicesStats.put(
-                indexName, new IndexStats(indexName, index.getUUID(), shards.toArray(new ShardStats[shards.size()]))
-            );
-        }
-        this.indicesStats = indicesStats;
+        indicesStats = indexToIndexStatsBuilder.entrySet().stream()
+                .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().build()));
         return indicesStats;
     }
 
diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java
index a7e3ee57a08..99850699ec2 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java
@@ -19,16 +19,30 @@
 
 package org.elasticsearch.action.admin.indices.stats;
 
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.TestShardRouting;
+import org.elasticsearch.common.UUIDs;
 import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.ShardPath;
 import org.elasticsearch.test.ESTestCase;
 
+import java.nio.file.Path;
+import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
 
 import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.Matchers.containsInAnyOrder;
+import static org.hamcrest.Matchers.is;
 import static org.hamcrest.object.HasToString.hasToString;
 
-
 public class IndicesStatsResponseTests extends ESTestCase {
 
     public void testInvalidLevel() {
@@ -42,4 +56,59 @@ public class IndicesStatsResponseTests extends ESTestCase {
             hasToString(containsString("level parameter must be one of [cluster] or [indices] or [shards] but was [" + level + "]")));
     }
 
+    public void testGetIndices() {
+        List<ShardStats> shards = new ArrayList<>();
+        int noOfIndexes = randomIntBetween(2, 5);
+        List<String> expectedIndexes = new ArrayList<>();
+        Map<String, AtomicLong> expectedIndexToPrimaryShardsCount = new HashMap<>();
+        Map<String, AtomicLong> expectedIndexToTotalShardsCount = new HashMap<>();
+
+        for (int indCnt = 0; indCnt < noOfIndexes; indCnt++) {
+            Index index = createIndex(randomAlphaOfLength(9));
+            expectedIndexes.add(index.getName());
+            int numShards = randomIntBetween(1, 5);
+            for (int shardId = 0; shardId < numShards; shardId++) {
+                ShardId shId = new ShardId(index, shardId);
+                Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve(String.valueOf(shardId));
+                ShardPath shardPath = new ShardPath(false, path, path, shId);
+                ShardRouting routing = createShardRouting(index, shId, (shardId == 0));
+                shards.add(new ShardStats(routing, shardPath, null, null, null, null));
+                AtomicLong primaryShardsCounter = expectedIndexToPrimaryShardsCount.computeIfAbsent(index.getName(),
+                        k -> new AtomicLong(0L));
+                if (routing.primary()) {
+                    primaryShardsCounter.incrementAndGet();
+                }
+                AtomicLong shardsCounter = expectedIndexToTotalShardsCount.computeIfAbsent(index.getName(), k -> new AtomicLong(0L));
+                shardsCounter.incrementAndGet();
+            }
+        }
+        final IndicesStatsResponse indicesStatsResponse = new IndicesStatsResponse(shards.toArray(new ShardStats[shards.size()]), 0, 0, 0,
+                null);
+        Map<String, IndexStats> indexStats = indicesStatsResponse.getIndices();
+
+        assertThat(indexStats.size(), is(noOfIndexes));
+        assertThat(indexStats.keySet(), containsInAnyOrder(expectedIndexes.toArray(new String[0])));
+
+        for (String index : indexStats.keySet()) {
+            IndexStats stat = indexStats.get(index);
+            ShardStats[] shardStats = stat.getShards();
+            long primaryCount = 0L;
+            long totalCount = shardStats.length;
+            for (ShardStats shardStat : shardStats) {
+                if (shardStat.getShardRouting().primary()) {
+                    primaryCount++;
+                }
+            }
+            assertThat(primaryCount, is(expectedIndexToPrimaryShardsCount.get(index).get()));
+            assertThat(totalCount, is(expectedIndexToTotalShardsCount.get(index).get()));
+        }
+    }
+
+    private ShardRouting createShardRouting(Index index, ShardId shardId, boolean isPrimary) {
+        return TestShardRouting.newShardRouting(shardId, randomAlphaOfLength(4), isPrimary, ShardRoutingState.STARTED);
+    }
+
+    private Index createIndex(String indexName) {
+        return new Index(indexName, UUIDs.base64UUID());
+    }
 }

From 47ba45732d173a5bb45309bf53e19ceaf44a35b1 Mon Sep 17 00:00:00 2001
From: Yogesh Gaikwad <902768+bizybot@users.noreply.github.com>
Date: Sat, 13 Apr 2019 04:37:25 +1000
Subject: [PATCH 013/112] Find and use non local IPv4 address while testing IP
 filtering (#40234) (#41141)

For pattern "n:localhost" PatternRule#isLocalhost() matches
any local address, loopback address.
[Note: I think for "localhost" this should not consider IP address
as a match when they are bound to network interfaces. It should just
be loopback address check unless the intent is to match all local addresses.
This class is adopted from Netty3 and I am not sure if this is intended
behavior or maybe I am missing something]

For now I have fixed this assuming the PatternRule#isLocalhost check is
correct by avoiding use of local address to check address denied.

Closes #40194
---
 .../transport/filter/IPFilterTests.java       | 29 +++++++++++++++++--
 1 file changed, 27 insertions(+), 2 deletions(-)

diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java
index 78825d95ce0..e3777fc8545 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java
@@ -5,6 +5,7 @@
  */
 package org.elasticsearch.xpack.security.transport.filter;
 
+import org.elasticsearch.common.Numbers;
 import org.elasticsearch.common.component.Lifecycle;
 import org.elasticsearch.common.network.InetAddresses;
 import org.elasticsearch.common.network.NetworkAddress;
@@ -26,6 +27,9 @@ import org.mockito.ArgumentCaptor;
 
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -35,6 +39,7 @@ import java.util.Locale;
 import java.util.Map;
 
 import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
@@ -140,7 +145,8 @@ public class IPFilterTests extends ESTestCase {
         ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState);
         ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses());
         assertAddressIsAllowed("127.0.0.1");
-        assertAddressIsDenied("192.168.0.1");
+        // when "localhost" is used, ES considers all local addresses see PatternRule#isLocalhost()
+        assertAddressIsDenied(randomNonLocalIPv4Address());
         assertAddressIsAllowedForProfile("client", "192.168.0.1");
         assertAddressIsDeniedForProfile("client", "192.168.0.2");
     }
@@ -161,7 +167,8 @@ public class IPFilterTests extends ESTestCase {
         clusterSettings.updateDynamicSettings(newSettings, updatedSettingsBuilder, Settings.builder(), "test");
         clusterSettings.applySettings(updatedSettingsBuilder.build());
         assertAddressIsAllowed("127.0.0.1");
-        assertAddressIsDenied("192.168.0.1");
+        // when "localhost" is used, ES considers all local addresses see PatternRule#isLocalhost()
+        assertAddressIsDenied(randomNonLocalIPv4Address());
         assertAddressIsAllowedForProfile("client", "192.168.0.1", "192.168.0.2");
         assertAddressIsDeniedForProfile("client", "192.168.0.3");
     }
@@ -297,4 +304,22 @@ public class IPFilterTests extends ESTestCase {
     private void assertAddressIsDenied(String ... inetAddresses) {
         assertAddressIsDeniedForProfile("default", inetAddresses);
     }
+
+    private String randomNonLocalIPv4Address() throws SocketException, UnknownHostException {
+        String ipv4Address = null;
+        int noOfRetries = 0;
+        do {
+            noOfRetries++;
+            final InetAddress address = InetAddress.getByAddress(Numbers.intToBytes(randomInt()));
+            if (address.isAnyLocalAddress() || address.isLoopbackAddress() || NetworkInterface.getByInetAddress(address) != null) {
+                continue;
+            } else {
+                ipv4Address = NetworkAddress.format(address);
+                break;
+            }
+        } while (ipv4Address == null && noOfRetries < 25);
+        assertThat("could not generate random IPv4 address which is not local address", ipv4Address, notNullValue());
+        return ipv4Address;
+    }
+
 }

From c8bc4ab0033f6fa485ec1e3b37b50c7029fbc148 Mon Sep 17 00:00:00 2001
From: Gordon Brown <gordon.brown@elastic.co>
Date: Fri, 12 Apr 2019 16:53:50 -0600
Subject: [PATCH 014/112] Improve Watcher test framework resiliency (#40658)

It is possible for the watches tracked by ScheduleTriggerEngineMock to
get out of sync with the Watches in the ScheduleTriggerEngine
production code, which can lead to watches failing to run.

This commit:

1. Changes TimeWarp to try to run the watch on all schedulers, rather than stopping after one which claims to have the watch registered. This reduces the impact of desynchronization between the mocking code and the backing production code.
2. Makes ScheduleTriggerEngineMock respect pauses of execution again. This is necessary to prevent duplicate watch invocations due to the above change.
3. Tweaks how watches are registered in ScheduleTriggerEngineMock to prevent race conditions due to concurrent modification.
4. Tweaks WatcherConcreteIndexTests to use TimeWarp instead of waiting for watches to be triggered, as TimeWarp is more reliable and accomplishes the same goal.
---
 .../watcher/WatcherConcreteIndexTests.java    |  9 +++--
 .../webhook/WebhookHttpsIntegrationTests.java |  3 +-
 .../webhook/WebhookIntegrationTests.java      |  4 +--
 .../AbstractWatcherIntegrationTestCase.java   | 14 ++++++--
 .../test/integration/BasicWatcherTests.java   |  2 --
 .../HttpSecretsIntegrationTests.java          |  1 -
 .../test/integration/WatchAckTests.java       |  1 -
 .../test/integration/WatchMetadataTests.java  |  1 -
 .../trigger/ScheduleTriggerEngineMock.java    | 35 ++++++++++++-------
 9 files changed, 39 insertions(+), 31 deletions(-)

diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java
index e6b253d1739..237c0a2bdf1 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java
@@ -23,11 +23,6 @@ import static org.hamcrest.Matchers.greaterThan;
 
 public class WatcherConcreteIndexTests extends AbstractWatcherIntegrationTestCase {
 
-    @Override
-    protected boolean timeWarped() {
-        return false;
-    }
-
     public void testCanUseAnyConcreteIndexName() throws Exception {
         String newWatcherIndexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
         String watchResultsIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT);
@@ -35,6 +30,7 @@ public class WatcherConcreteIndexTests extends AbstractWatcherIntegrationTestCas
 
         stopWatcher();
         replaceWatcherIndexWithRandomlyNamedIndex(Watch.INDEX, newWatcherIndexName);
+        ensureGreen(newWatcherIndexName);
         startWatcher();
 
         PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("mywatch").setSource(watchBuilder()
@@ -45,6 +41,9 @@ public class WatcherConcreteIndexTests extends AbstractWatcherIntegrationTestCas
             .get();
 
         assertTrue(putWatchResponse.isCreated());
+        refresh();
+
+        timeWarp().trigger("mywatch");
 
         assertBusy(() -> {
             SearchResponse searchResult = client().prepareSearch(watchResultsIndex).setTrackTotalHits(true).get();
diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java
index adbf4314032..bdaa2377fd1 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java
@@ -15,10 +15,10 @@ import org.elasticsearch.xpack.core.ssl.TestsSSLService;
 import org.elasticsearch.xpack.core.watcher.history.WatchRecord;
 import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource;
 import org.elasticsearch.xpack.watcher.actions.ActionBuilders;
+import org.elasticsearch.xpack.watcher.common.http.BasicAuth;
 import org.elasticsearch.xpack.watcher.common.http.HttpMethod;
 import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate;
 import org.elasticsearch.xpack.watcher.common.http.Scheme;
-import org.elasticsearch.xpack.watcher.common.http.BasicAuth;
 import org.elasticsearch.xpack.watcher.common.text.TextTemplate;
 import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition;
 import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase;
@@ -67,7 +67,6 @@ public class WebhookHttpsIntegrationTests extends AbstractWatcherIntegrationTest
         webServer.close();
     }
 
-    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35503")
     public void testHttps() throws Exception {
         webServer.enqueue(new MockResponse().setResponseCode(200).setBody("body"));
         HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder("localhost", webServer.getPort())
diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java
index 521cc2d49fc..2c961db6187 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java
@@ -5,7 +5,6 @@
  */
 package org.elasticsearch.xpack.watcher.actions.webhook;
 
-import org.apache.lucene.util.LuceneTestCase;
 import org.elasticsearch.action.get.GetResponse;
 import org.elasticsearch.action.search.SearchResponse;
 import org.elasticsearch.common.transport.TransportAddress;
@@ -18,9 +17,9 @@ import org.elasticsearch.transport.Netty4Plugin;
 import org.elasticsearch.xpack.core.watcher.history.WatchRecord;
 import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource;
 import org.elasticsearch.xpack.watcher.actions.ActionBuilders;
+import org.elasticsearch.xpack.watcher.common.http.BasicAuth;
 import org.elasticsearch.xpack.watcher.common.http.HttpMethod;
 import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate;
-import org.elasticsearch.xpack.watcher.common.http.BasicAuth;
 import org.elasticsearch.xpack.watcher.common.text.TextTemplate;
 import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition;
 import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase;
@@ -44,7 +43,6 @@ import static org.hamcrest.Matchers.hasSize;
 import static org.hamcrest.Matchers.is;
 import static org.hamcrest.Matchers.notNullValue;
 
-@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35503")
 public class WebhookIntegrationTests extends AbstractWatcherIntegrationTestCase {
 
     private MockWebServer webServer = new MockWebServer();
diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java
index 21e5751029f..8c44ba831b3 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java
@@ -5,6 +5,8 @@
  */
 package org.elasticsearch.xpack.watcher.test;
 
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
 import org.elasticsearch.action.admin.indices.alias.Alias;
 import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
 import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
@@ -580,6 +582,7 @@ public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase
     }
 
     protected static class TimeWarp {
+        private static final Logger logger = LogManager.getLogger(TimeWarp.class);
 
         private final List<ScheduleTriggerEngineMock> schedulers;
         private final ClockMock clock;
@@ -598,9 +601,14 @@ public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase
         }
 
         public void trigger(String watchId, int times, TimeValue timeValue) {
-            boolean isTriggered = schedulers.stream().anyMatch(scheduler -> scheduler.trigger(watchId, times, timeValue));
-            String msg = String.format(Locale.ROOT, "could not find watch [%s] to trigger", watchId);
-            assertThat(msg, isTriggered, is(true));
+            long triggeredCount = schedulers.stream()
+                .filter(scheduler -> scheduler.trigger(watchId, times, timeValue))
+                .count();
+            String msg = String.format(Locale.ROOT, "watch was triggered on [%d] schedulers, expected [1]", triggeredCount);
+            if (triggeredCount > 1) {
+                logger.warn(msg);
+            }
+            assertThat(msg, triggeredCount, greaterThanOrEqualTo(1L));
         }
     }
 
diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java
index 05d8b4ef29d..2f2299d7d65 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java
@@ -5,7 +5,6 @@
  */
 package org.elasticsearch.xpack.watcher.test.integration;
 
-import org.apache.lucene.util.LuceneTestCase;
 import org.elasticsearch.ElasticsearchParseException;
 import org.elasticsearch.action.search.SearchResponse;
 import org.elasticsearch.action.search.SearchType;
@@ -63,7 +62,6 @@ import static org.hamcrest.Matchers.notNullValue;
 
 @TestLogging("org.elasticsearch.xpack.watcher:DEBUG," +
              "org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE")
-@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35503")
 public class BasicWatcherTests extends AbstractWatcherIntegrationTestCase {
 
     public void testIndexWatch() throws Exception {
diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java
index 3eefa031371..f8ddc3065f7 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java
@@ -87,7 +87,6 @@ public class HttpSecretsIntegrationTests extends AbstractWatcherIntegrationTestC
         return super.nodeSettings(nodeOrdinal);
     }
 
-    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40587")
     public void testHttpInput() throws Exception {
         WatcherClient watcherClient = watcherClient();
         watcherClient.preparePutWatch("_id")
diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java
index 0e95a15b2a3..a0ef5e97d85 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java
@@ -122,7 +122,6 @@ public class WatchAckTests extends AbstractWatcherIntegrationTestCase {
         assertThat(throttledCount, greaterThan(0L));
     }
 
-    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35506")
     public void testAckAllActions() throws Exception {
         PutWatchResponse putWatchResponse = watcherClient().preparePutWatch()
                 .setId("_id")
diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java
index aff3a62c12c..1e2c1ddbc64 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java
@@ -38,7 +38,6 @@ import static org.hamcrest.Matchers.greaterThan;
 
 public class WatchMetadataTests extends AbstractWatcherIntegrationTestCase {
 
-    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40631")
     public void testWatchMetadata() throws Exception {
         Map<String, Object> metadata = new HashMap<>();
         metadata.put("foo", "bar");
diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java
index f58954658fc..3e46f7102c1 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java
@@ -21,8 +21,10 @@ import java.time.Clock;
 import java.time.ZonedDateTime;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
 
 /**
  * A mock scheduler to help with unit testing. Provide {@link ScheduleTriggerEngineMock#trigger} method to manually trigger
@@ -31,7 +33,8 @@ import java.util.concurrent.ConcurrentMap;
 public class ScheduleTriggerEngineMock extends ScheduleTriggerEngine {
     private static final Logger logger = LogManager.getLogger(ScheduleTriggerEngineMock.class);
 
-    private final ConcurrentMap<String, Watch> watches = new ConcurrentHashMap<>();
+    private final AtomicReference<Map<String, Watch>> watches = new AtomicReference<>(new ConcurrentHashMap<>());
+    private final AtomicBoolean paused = new AtomicBoolean(false);
 
     public ScheduleTriggerEngineMock(ScheduleRegistry scheduleRegistry, Clock clock) {
         super(scheduleRegistry, clock);
@@ -49,30 +52,32 @@ public class ScheduleTriggerEngineMock extends ScheduleTriggerEngine {
     }
 
     @Override
-    public void start(Collection<Watch> jobs) {
-        jobs.forEach(this::add);
+    public synchronized void start(Collection<Watch> jobs) {
+        Map<String, Watch> newWatches = new ConcurrentHashMap<>();
+        jobs.forEach((watch) -> newWatches.put(watch.id(), watch));
+        watches.set(newWatches);
+        paused.set(false);
     }
 
     @Override
     public void stop() {
-        watches.clear();
+        watches.set(new ConcurrentHashMap<>());
     }
 
     @Override
-    public void add(Watch watch) {
+    public synchronized void add(Watch watch) {
         logger.debug("adding watch [{}]", watch.id());
-        watches.put(watch.id(), watch);
+        watches.get().put(watch.id(), watch);
     }
 
     @Override
     public void pauseExecution() {
-        // No action is needed because this engine does not trigger watches on a schedule (instead
-        // they must be triggered manually).
+        paused.set(true);
     }
 
     @Override
-    public boolean remove(String jobId) {
-        return watches.remove(jobId) != null;
+    public synchronized boolean remove(String jobId) {
+        return watches.get().remove(jobId) != null;
     }
 
     public boolean trigger(String jobName) {
@@ -80,7 +85,11 @@ public class ScheduleTriggerEngineMock extends ScheduleTriggerEngine {
     }
 
     public boolean trigger(String jobName, int times, TimeValue interval) {
-        if (watches.containsKey(jobName) == false) {
+        if (watches.get().containsKey(jobName) == false) {
+            return false;
+        }
+        if (paused.get()) {
+            logger.info("not executing watch [{}] on this scheduler because it is paused", jobName);
             return false;
         }
 
@@ -89,7 +98,7 @@ public class ScheduleTriggerEngineMock extends ScheduleTriggerEngine {
             logger.debug("firing watch [{}] at [{}]", jobName, now);
             ScheduleTriggerEvent event = new ScheduleTriggerEvent(jobName, now, now);
             consumers.forEach(consumer -> consumer.accept(Collections.singletonList(event)));
-            if (interval != null)  {
+            if (interval != null) {
                 if (clock instanceof ClockMock) {
                     ((ClockMock) clock).fastForward(interval);
                 } else {

From 5ef247dc91a42093e18d45e71fb8785da11f58a5 Mon Sep 17 00:00:00 2001
From: David Turner <david.turner@elastic.co>
Date: Sun, 14 Apr 2019 10:39:50 +0100
Subject: [PATCH 015/112] Further clarify cluster.initial_master_nodes (#41179)

The following phrase causes confusion:

> Alternatively the IP addresses or hostnames (if node name defaults to the
> host name) can be used.

This change clarifies the conditions under which you can use a hostname, and
adds an anchor to the note introduced in (#41137) so we can link directly to it
in conversations with users.
---
 .../modules/discovery/bootstrapping.asciidoc  | 62 ++++++++++---------
 .../discovery-settings.asciidoc               | 14 +++--
 2 files changed, 42 insertions(+), 34 deletions(-)

diff --git a/docs/reference/modules/discovery/bootstrapping.asciidoc b/docs/reference/modules/discovery/bootstrapping.asciidoc
index f4a775e12df..0ba7d4b17ce 100644
--- a/docs/reference/modules/discovery/bootstrapping.asciidoc
+++ b/docs/reference/modules/discovery/bootstrapping.asciidoc
@@ -9,16 +9,21 @@ up: nodes that have already joined a cluster store this information in their
 data folder and freshly-started nodes that are joining an existing cluster
 obtain this information from the cluster's elected master. 
 
-The initial set of master-eligible nodes is defined in the 
-<<initial_master_nodes,`cluster.initial_master_nodes` setting>>. When you
-start a master-eligible node, you can provide this setting on the command line
-or in the `elasticsearch.yml` file. After the cluster has formed, this setting
-is no longer required and is ignored. It need not be set
-on master-ineligible nodes, nor on master-eligible nodes that are started to
-join an existing cluster. Note that master-eligible nodes should use storage
-that persists across restarts. If they do not, and
-`cluster.initial_master_nodes` is set, and a full cluster restart occurs, then
-another brand-new cluster will form and this may result in data loss.
+The initial set of master-eligible nodes is defined in the
+<<initial_master_nodes,`cluster.initial_master_nodes` setting>>. This is a list
+of the <<node.name,node names>> or IP addresses of the master-eligible nodes in
+the new cluster. If you do not configure `node.name` then it is set to the
+node's hostname, so in this case you can use hostnames in
+`cluster.initial_master_nodes` too.
+
+When you start a master-eligible node, you can provide this setting on the
+command line or in the `elasticsearch.yml` file. After the cluster has formed,
+this setting is no longer required and is ignored. It need not be set on
+master-ineligible nodes, nor on master-eligible nodes that are started to join
+an existing cluster. Note that master-eligible nodes should use storage that
+persists across restarts. If they do not, and `cluster.initial_master_nodes` is
+set, and a full cluster restart occurs, then another brand-new cluster will
+form and this may result in data loss.
 
 It is technically sufficient to set `cluster.initial_master_nodes` on a single
 master-eligible node in the cluster, and only to mention that single node in the
@@ -42,10 +47,9 @@ cluster.initial_master_nodes:
   - master-c
 --------------------------------------------------
 
-Alternatively the IP addresses or hostnames (<<node.name,if node name defaults
-to the host name>>) can be used. If there is more than one Elasticsearch node
-with the same IP address or hostname then the transport ports must also be given
-to specify exactly which node is meant:
+You can use a mix of IP addresses and node names too. If there is more than one
+Elasticsearch node with the same IP address then the transport port must also
+be given to specify exactly which node is meant:
 
 [source,yaml]
 --------------------------------------------------
@@ -56,14 +60,23 @@ cluster.initial_master_nodes:
   - master-node-hostname
 --------------------------------------------------
 
+Like all node settings, it is also possible to specify the initial set of master
+nodes on the command-line that is used to start Elasticsearch:
+
+[source,bash]
+--------------------------------------------------
+$ bin/elasticsearch -Ecluster.initial_master_nodes=master-a,master-b,master-c
+--------------------------------------------------
+
 [NOTE]
 ==================================================
 
-The node names used in this list must exactly match the `node.name` properties
-of the nodes. By default the node name is set to the machine's hostname which
-may or may not be fully-qualified depending on your system configuration. If
-each node name is a fully-qualified domain name such as `master-a.example.com`
-then you must use fully-qualified domain names in the
+[[modules-discovery-bootstrap-cluster-fqdns]] The node names used in the
+`cluster.initial_master_nodes` list must exactly match the `node.name`
+properties of the nodes. By default the node name is set to the machine's
+hostname which may or may not be fully-qualified depending on your system
+configuration. If each node name is a fully-qualified domain name such as
+`master-a.example.com` then you must use fully-qualified domain names in the
 `cluster.initial_master_nodes` list too; conversely if your node names are bare
 hostnames (without the `.example.com` suffix) then you must use bare hostnames
 in the `cluster.initial_master_nodes` list. If you use a mix of fully-qualifed
@@ -81,18 +94,11 @@ bootstrap a cluster: have discovered [{master-b.example.com}{...
 
 This message shows the node names `master-a.example.com` and
 `master-b.example.com` as well as the `cluster.initial_master_nodes` entries
-`master-a` and `master-b`, and it is apparent that they do not match exactly.
+`master-a` and `master-b`, and it is clear from this message that they do not
+match exactly.
 
 ==================================================
 
-Like all node settings, it is also possible to specify the initial set of master
-nodes on the command-line that is used to start Elasticsearch:
-
-[source,bash]
---------------------------------------------------
-$ bin/elasticsearch -Ecluster.initial_master_nodes=master-a,master-b,master-c
---------------------------------------------------
-
 [float]
 ==== Choosing a cluster name
 
diff --git a/docs/reference/setup/important-settings/discovery-settings.asciidoc b/docs/reference/setup/important-settings/discovery-settings.asciidoc
index 4edf5cfbab5..5709ae3bb93 100644
--- a/docs/reference/setup/important-settings/discovery-settings.asciidoc
+++ b/docs/reference/setup/important-settings/discovery-settings.asciidoc
@@ -58,15 +58,17 @@ cluster.initial_master_nodes:
     `transport.port` if not specified.
 <2> If a hostname resolves to multiple IP addresses then the node will attempt to
     discover other nodes at all resolved addresses.
-<3> Initial master nodes can be identified by their <<node.name,`node.name`>>.
-    Make sure that the value here matches the `node.name` exactly. If you use a
-    fully-qualified domain name such as `master-node-a.example.com` for your
+<3> Initial master nodes can be identified by their <<node.name,`node.name`>>,
+    which defaults to the hostname. Make sure that the value in
+    `cluster.initial_master_nodes` matches the `node.name` exactly. If you use
+    a fully-qualified domain name such as `master-node-a.example.com` for your
     node names then you must use the fully-qualified name in this list;
     conversely if `node.name` is a bare hostname without any trailing
     qualifiers then you must also omit the trailing qualifiers in
     `cluster.initial_master_nodes`.
 <4> Initial master nodes can also be identified by their IP address.
-<5> If multiple master nodes share an IP address then the port must be used to
-    disambiguate them.
+<5> If multiple master nodes share an IP address then the transport port must
+    be used to distinguish between them.
 
-For more information, see <<modules-discovery-settings>>.
+For more information, see <<modules-discovery-bootstrap-cluster>> and
+<<modules-discovery-settings>>.

From 2980a6c70fe4157f6bc11a8b00b6a2a8bb1f0535 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christoph=20B=C3=BCscher?= <cbuescher@posteo.de>
Date: Mon, 15 Apr 2019 09:41:34 +0200
Subject: [PATCH 016/112] Clarify some ToXContent implementations behaviour
 (#41000)

This change adds either ToXContentObject or ToXContentFragment to classes
directly implementing ToXContent currently. This helps in reasoning about
whether those implementations output full xcontent object or just fragments.

Relates to #16347
---
 .../client/graph/GraphExploreResponse.java    |  2 -
 .../indices/PutIndexTemplateRequest.java      | 34 ++++++------
 .../client/ml/FindFileStructureRequest.java   |  4 +-
 .../client/RestHighLevelClientTests.java      | 14 +++--
 .../action/PainlessExecuteAction.java         |  3 +-
 .../action/ShardOperationFailedException.java |  4 +-
 .../storedscripts/PutStoredScriptRequest.java |  4 +-
 .../shards/IndicesShardStoresResponse.java    |  7 ++-
 .../template/put/PutIndexTemplateRequest.java | 55 ++++++++++---------
 .../search/SearchPhaseExecutionException.java |  2 -
 .../action/search/SearchResponse.java         |  4 +-
 .../action/search/ShardSearchFailure.java     | 18 +++---
 .../DefaultShardOperationFailedException.java |  7 +++
 .../routing/allocation/decider/Decision.java  |  6 +-
 .../index/query/IntervalsSourceProvider.java  |  4 +-
 .../index/seqno/RetentionLease.java           |  4 +-
 .../index/seqno/RetentionLeases.java          |  3 +-
 .../rest/action/RestActions.java              |  2 -
 .../elasticsearch/snapshots/SnapshotInfo.java |  4 --
 .../snapshots/SnapshotShardFailure.java       |  2 +
 .../common/xcontent/BaseXContentTestCase.java |  5 +-
 .../xpack/graph/GraphExploreResponse.java     |  2 -
 .../rollup/action/GetRollupCapsAction.java    |  5 +-
 .../action/GetRollupIndexCapsAction.java      |  5 +-
 .../rollup/action/GetRollupJobsAction.java    |  5 +-
 .../rollup/action/StartRollupJobAction.java   |  5 +-
 .../rollup/action/StopRollupJobAction.java    |  5 +-
 .../action/token/InvalidateTokenResponse.java |  3 +-
 .../support/mapper/TemplateRoleName.java      |  4 +-
 .../actions/get/GetWatchResponse.java         |  6 +-
 .../rest/action/RestGetWatchAction.java       |  2 -
 31 files changed, 123 insertions(+), 107 deletions(-)

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java
index dddc4bedfe4..21717312901 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java
@@ -133,9 +133,7 @@ public class GraphExploreResponse implements ToXContentObject {
         builder.startArray(FAILURES.getPreferredName());
         if (shardFailures != null) {
             for (ShardOperationFailedException shardFailure : shardFailures) {
-                builder.startObject();
                 shardFailure.toXContent(builder, params);
-                builder.endObject();
             }
         }
         builder.endArray();
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/PutIndexTemplateRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/PutIndexTemplateRequest.java
index 5f22691b046..7008a719b7b 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/PutIndexTemplateRequest.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/PutIndexTemplateRequest.java
@@ -31,7 +31,7 @@ import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.DeprecationHandler;
 import org.elasticsearch.common.xcontent.NamedXContentRegistry;
-import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentFragment;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentFactory;
 import org.elasticsearch.common.xcontent.XContentHelper;
@@ -56,7 +56,7 @@ import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
 /**
  * A request to create an index template.
  */
-public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateRequest> implements IndicesRequest, ToXContent {
+public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateRequest> implements IndicesRequest, ToXContentFragment {
 
     private String name;
 
@@ -191,7 +191,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
     public Settings settings() {
         return this.settings;
     }
-    
+
     /**
      * Adds mapping that will be added when the index gets created.
      *
@@ -201,7 +201,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
     public PutIndexTemplateRequest mapping(String source, XContentType xContentType) {
         internalMapping(XContentHelper.convertToMap(new BytesArray(source), true, xContentType).v2());
         return this;
-    }    
+    }
 
     /**
      * The cause for this index template creation.
@@ -221,11 +221,11 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
      * @param source The mapping source
      */
     public PutIndexTemplateRequest mapping(XContentBuilder source) {
-        internalMapping(XContentHelper.convertToMap(BytesReference.bytes(source), 
+        internalMapping(XContentHelper.convertToMap(BytesReference.bytes(source),
                 true, source.contentType()).v2());
-        return this;        
-    }    
-    
+        return this;
+    }
+
     /**
      * Adds mapping that will be added when the index gets created.
      *
@@ -235,8 +235,8 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
     public PutIndexTemplateRequest mapping(BytesReference source, XContentType xContentType) {
         internalMapping(XContentHelper.convertToMap(source, true, xContentType).v2());
         return this;
-    } 
-    
+    }
+
     /**
      * Adds mapping that will be added when the index gets created.
      *
@@ -244,7 +244,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
      */
     public PutIndexTemplateRequest mapping(Map<String, Object> source) {
         return internalMapping(source);
-    }      
+    }
 
     private PutIndexTemplateRequest internalMapping(Map<String, Object> source) {
         try {
@@ -257,12 +257,12 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
                 return this;
             } catch (IOException e) {
                 throw new UncheckedIOException("failed to convert source to json", e);
-            }            
+            }
         } catch (IOException e) {
             throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
         }
-    }   
-    
+    }
+
     public BytesReference mappings() {
         return this.mappings;
     }
@@ -349,8 +349,8 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
      */
     public PutIndexTemplateRequest source(BytesReference source, XContentType xContentType) {
         return source(XContentHelper.convertToMap(source, true, xContentType).v2());
-    }    
-    
+    }
+
 
     public Set<Alias> aliases() {
         return this.aliases;
@@ -441,7 +441,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
                 builder.copyCurrentStructure(parser);
             }
         }
-        
+
         builder.startObject("aliases");
         for (Alias alias : aliases) {
             alias.toXContent(builder, params);
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FindFileStructureRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FindFileStructureRequest.java
index 90e0c720e88..adfee92bd61 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FindFileStructureRequest.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FindFileStructureRequest.java
@@ -25,7 +25,7 @@ import org.elasticsearch.common.ParseField;
 import org.elasticsearch.common.bytes.BytesArray;
 import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentFragment;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 
 import java.io.IOException;
@@ -34,7 +34,7 @@ import java.util.List;
 import java.util.Objects;
 import java.util.Optional;
 
-public class FindFileStructureRequest implements Validatable, ToXContent {
+public class FindFileStructureRequest implements Validatable, ToXContentFragment {
 
     public static final ParseField LINES_TO_SAMPLE = new ParseField("lines_to_sample");
     public static final ParseField TIMEOUT = new ParseField("timeout");
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java
index 90d440fe723..ed5d7b66d80 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java
@@ -20,6 +20,7 @@
 package org.elasticsearch.client;
 
 import com.fasterxml.jackson.core.JsonParseException;
+
 import org.apache.http.HttpEntity;
 import org.apache.http.HttpHost;
 import org.apache.http.HttpResponse;
@@ -61,6 +62,7 @@ import org.elasticsearch.common.collect.Tuple;
 import org.elasticsearch.common.util.set.Sets;
 import org.elasticsearch.common.xcontent.NamedXContentRegistry;
 import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentFragment;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.common.xcontent.cbor.CborXContent;
@@ -176,7 +178,7 @@ public class RestHighLevelClientTests extends ESTestCase {
         MainResponse testInfo = new MainResponse("nodeName", new MainResponse.Version("number", "buildFlavor", "buildType", "buildHash",
             "buildDate", true, "luceneVersion", "minimumWireCompatibilityVersion", "minimumIndexCompatibilityVersion"),
             "clusterName", "clusterUuid", "You Know, for Search");
-        mockResponse((builder, params) -> {
+        mockResponse((ToXContentFragment) (builder, params) -> {
             // taken from the server side MainResponse
             builder.field("name", testInfo.getNodeName());
             builder.field("cluster_name", testInfo.getClusterName());
@@ -762,12 +764,12 @@ public class RestHighLevelClientTests extends ESTestCase {
                     Collectors.mapping(Tuple::v2, Collectors.toSet())));
 
         // TODO remove in 8.0 - we will undeprecate indices.get_template because the current getIndexTemplate
-        // impl will replace the existing getTemplate method. 
+        // impl will replace the existing getTemplate method.
         // The above general-purpose code ignores all deprecated methods which in this case leaves `getTemplate`
-        // looking like it doesn't have a valid implementatation when it does. 
+        // looking like it doesn't have a valid implementatation when it does.
         apiUnsupported.remove("indices.get_template");
-        
-        
+
+
 
         for (Map.Entry<String, Set<Method>> entry : methods.entrySet()) {
             String apiName = entry.getKey();
@@ -830,7 +832,7 @@ public class RestHighLevelClientTests extends ESTestCase {
             assertThat("the return type for method [" + method + "] is incorrect",
                 method.getReturnType().getSimpleName(), equalTo("boolean"));
         } else {
-            // It's acceptable for 404s to be represented as empty Optionals 
+            // It's acceptable for 404s to be represented as empty Optionals
             if (!method.getReturnType().isAssignableFrom(Optional.class)) {
                 assertThat("the return type for method [" + method + "] is incorrect",
                     method.getReturnType().getSimpleName(), endsWith("Response"));
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java
index 81e3bdcd5c4..e8d93b8ef77 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java
@@ -55,7 +55,6 @@ import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.ConstructingObjectParser;
 import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
 import org.elasticsearch.common.xcontent.NamedXContentRegistry;
-import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentHelper;
@@ -107,7 +106,7 @@ public class PainlessExecuteAction extends Action<PainlessExecuteAction.Response
         return new Response();
     }
 
-    public static class Request extends SingleShardRequest<Request> implements ToXContent {
+    public static class Request extends SingleShardRequest<Request> implements ToXContentObject {
 
         private static final ParseField SCRIPT_FIELD = new ParseField("script");
         private static final ParseField CONTEXT_FIELD = new ParseField("context");
diff --git a/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java
index 490a1760abe..34a8ccd7ad1 100644
--- a/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java
+++ b/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java
@@ -21,7 +21,7 @@ package org.elasticsearch.action;
 
 import org.elasticsearch.common.Nullable;
 import org.elasticsearch.common.io.stream.Streamable;
-import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.rest.RestStatus;
 
 import java.util.Objects;
@@ -30,7 +30,7 @@ import java.util.Objects;
  * An exception indicating that a failure occurred performing an operation on the shard.
  *
  */
-public abstract class ShardOperationFailedException implements Streamable, ToXContent {
+public abstract class ShardOperationFailedException implements Streamable, ToXContentObject {
 
     protected String index;
     protected int shardId = -1;
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java
index e7c5a07f568..1f271b2da5e 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java
@@ -25,7 +25,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest;
 import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentFragment;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentHelper;
 import org.elasticsearch.common.xcontent.XContentType;
@@ -36,7 +36,7 @@ import java.util.Objects;
 
 import static org.elasticsearch.action.ValidateActions.addValidationError;
 
-public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptRequest> implements ToXContent {
+public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptRequest> implements ToXContentFragment {
 
     private String id;
     private String context;
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java
index ed348539d35..0e2f9f752af 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java
@@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.indices.shards;
 
 import com.carrotsearch.hppc.cursors.IntObjectCursor;
 import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+
 import org.elasticsearch.ElasticsearchException;
 import org.elasticsearch.Version;
 import org.elasticsearch.action.ActionResponse;
@@ -267,8 +268,10 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
 
         @Override
         public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+            builder.startObject();
             builder.field("node", nodeId());
-            super.toXContent(builder, params);
+            super.innerToXContent(builder, params);
+            builder.endObject();
             return builder;
         }
     }
@@ -361,9 +364,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
         if (failures.size() > 0) {
             builder.startArray(Fields.FAILURES);
             for (Failure failure : failures) {
-                builder.startObject();
                 failure.toXContent(builder, params);
-                builder.endObject();
             }
             builder.endArray();
         }
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java
index 0ff5785fcd3..5278561feb3 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java
@@ -39,7 +39,7 @@ import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.DeprecationHandler;
 import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
 import org.elasticsearch.common.xcontent.NamedXContentRegistry;
-import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentFactory;
 import org.elasticsearch.common.xcontent.XContentHelper;
@@ -60,14 +60,14 @@ import java.util.Set;
 import java.util.stream.Collectors;
 
 import static org.elasticsearch.action.ValidateActions.addValidationError;
-import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
 import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
 import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
+import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
 
 /**
  * A request to create an index template.
  */
-public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateRequest> implements IndicesRequest, ToXContent {
+public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateRequest> implements IndicesRequest, ToXContentObject {
 
     private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(PutIndexTemplateRequest.class));
 
@@ -519,32 +519,35 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
 
     @Override
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
-        builder.field("index_patterns", indexPatterns);
-        builder.field("order", order);
-        if (version != null) {
-            builder.field("version", version);
-        }
-
-        builder.startObject("settings");
-        settings.toXContent(builder, params);
-        builder.endObject();
-
-        builder.startObject("mappings");
-        for (Map.Entry<String, String> entry : mappings.entrySet()) {
-            builder.field(entry.getKey());
-            try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY,
-                DeprecationHandler.THROW_UNSUPPORTED_OPERATION, entry.getValue())) {
-                builder.copyCurrentStructure(parser);
+        builder.startObject();
+        {
+            builder.field("index_patterns", indexPatterns);
+            builder.field("order", order);
+            if (version != null) {
+                builder.field("version", version);
             }
+
+            builder.startObject("settings");
+            settings.toXContent(builder, params);
+            builder.endObject();
+
+            builder.startObject("mappings");
+            for (Map.Entry<String, String> entry : mappings.entrySet()) {
+                builder.field(entry.getKey());
+                try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY,
+                        DeprecationHandler.THROW_UNSUPPORTED_OPERATION, entry.getValue())) {
+                    builder.copyCurrentStructure(parser);
+                }
+            }
+            builder.endObject();
+
+            builder.startObject("aliases");
+            for (Alias alias : aliases) {
+                alias.toXContent(builder, params);
+            }
+            builder.endObject();
         }
         builder.endObject();
-
-        builder.startObject("aliases");
-        for (Alias alias : aliases) {
-            alias.toXContent(builder, params);
-        }
-        builder.endObject();
-
         return builder;
     }
 }
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java
index e3247c4f551..8b4187c7cf0 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java
@@ -140,9 +140,7 @@ public class SearchPhaseExecutionException extends ElasticsearchException {
         builder.startArray();
         ShardOperationFailedException[] failures = ExceptionsHelper.groupBy(shardFailures);
         for (ShardOperationFailedException failure : failures) {
-            builder.startObject();
             failure.toXContent(builder, params);
-            builder.endObject();
         }
         builder.endArray();
     }
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java
index 6ae5e1a553e..681f13b7a81 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java
@@ -30,7 +30,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.xcontent.StatusToXContentObject;
-import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentFragment;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.common.xcontent.XContentParser.Token;
@@ -408,7 +408,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
      * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful
      * and how many of them were skipped.
      */
-    public static class Clusters implements ToXContent, Writeable {
+    public static class Clusters implements ToXContentFragment, Writeable {
 
         public static final Clusters EMPTY = new Clusters(0, 0, 0);
 
diff --git a/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java
index 451ceda70fd..cfd23e3c773 100644
--- a/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java
+++ b/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java
@@ -118,14 +118,18 @@ public class ShardSearchFailure extends ShardOperationFailedException {
 
     @Override
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
-        builder.field(SHARD_FIELD, shardId());
-        builder.field(INDEX_FIELD, index());
-        if (shardTarget != null) {
-            builder.field(NODE_FIELD, shardTarget.getNodeId());
-        }
-        builder.field(REASON_FIELD);
         builder.startObject();
-        ElasticsearchException.generateThrowableXContent(builder, params, cause);
+        {
+            builder.field(SHARD_FIELD, shardId());
+            builder.field(INDEX_FIELD, index());
+            if (shardTarget != null) {
+                builder.field(NODE_FIELD, shardTarget.getNodeId());
+            }
+            builder.field(REASON_FIELD);
+            builder.startObject();
+            ElasticsearchException.generateThrowableXContent(builder, params, cause);
+            builder.endObject();
+        }
         builder.endObject();
         return builder;
     }
diff --git a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java
index 85d8a2c1a38..7aa7dfb62a6 100644
--- a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java
+++ b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java
@@ -91,6 +91,13 @@ public class DefaultShardOperationFailedException extends ShardOperationFailedEx
 
     @Override
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+        builder.startObject();
+        innerToXContent(builder, params);
+        builder.endObject();
+        return builder;
+    }
+    
+    protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
         builder.field("shard", shardId());
         builder.field("index", index());
         builder.field("status", status.name());
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java
index 725df82e515..5311cd9c4a3 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java
@@ -24,6 +24,8 @@ import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentFragment;
+import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 
 import java.io.IOException;
@@ -150,7 +152,7 @@ public abstract class Decision implements ToXContent, Writeable {
     /**
      * Simple class representing a single decision
      */
-    public static class Single extends Decision {
+    public static class Single extends Decision implements ToXContentObject {
         private Type type;
         private String label;
         private String explanation;
@@ -269,7 +271,7 @@ public abstract class Decision implements ToXContent, Writeable {
     /**
      * Simple class representing a list of decisions
      */
-    public static class Multi extends Decision {
+    public static class Multi extends Decision implements ToXContentFragment {
 
         private final List<Decision> decisions = new ArrayList<>();
 
diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java
index 8aef53bc20e..6aa8f2d700e 100644
--- a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java
+++ b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java
@@ -31,8 +31,8 @@ import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.xcontent.ConstructingObjectParser;
-import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.ToXContentFragment;
+import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.index.analysis.NamedAnalyzer;
@@ -457,7 +457,7 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
         }
     }
 
-    public static class IntervalFilter implements ToXContent, Writeable {
+    public static class IntervalFilter implements ToXContentObject, Writeable {
 
         public static final String NAME = "filter";
 
diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java
index e6d6ed3fe82..9cfad7c36ea 100644
--- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java
+++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java
@@ -24,7 +24,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.xcontent.ConstructingObjectParser;
-import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentParser;
 
@@ -37,7 +37,7 @@ import java.util.Objects;
  * otherwise merge away operations that have been soft deleted). Each retention lease contains a unique identifier, the retaining sequence
  * number, the timestamp of when the lease was created or renewed, and the source of the retention lease (e.g., "ccr").
  */
-public final class RetentionLease implements ToXContent, Writeable {
+public final class RetentionLease implements ToXContentObject, Writeable {
 
     private final String id;
 
diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java
index 3bad8872825..7c3b9e3c7b9 100644
--- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java
+++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java
@@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.xcontent.ConstructingObjectParser;
 import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentFragment;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.gateway.MetaDataStateFormat;
@@ -42,7 +43,7 @@ import java.util.stream.Collectors;
  * Represents a versioned collection of retention leases. We version the collection of retention leases to ensure that sync requests that
  * arrive out of order on the replica, using the version to ensure that older sync requests are rejected.
  */
-public class RetentionLeases implements ToXContent, Writeable {
+public class RetentionLeases implements ToXContentFragment, Writeable {
 
     private final long primaryTerm;
 
diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestActions.java b/server/src/main/java/org/elasticsearch/rest/action/RestActions.java
index f25fd107e51..f6ad30706f7 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/RestActions.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/RestActions.java
@@ -91,9 +91,7 @@ public class RestActions {
         if (shardFailures != null && shardFailures.length > 0) {
             builder.startArray(FAILURES_FIELD.getPreferredName());
             for (ShardOperationFailedException shardFailure : ExceptionsHelper.groupBy(shardFailures)) {
-                builder.startObject();
                 shardFailure.toXContent(builder, params);
-                builder.endObject();
             }
             builder.endArray();
         }
diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
index cbd4ff659e5..8b907a54e51 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
@@ -514,9 +514,7 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
         if (verbose || !shardFailures.isEmpty()) {
             builder.startArray(FAILURES);
             for (SnapshotShardFailure shardFailure : shardFailures) {
-                builder.startObject();
                 shardFailure.toXContent(builder, params);
-                builder.endObject();
             }
             builder.endArray();
         }
@@ -555,9 +553,7 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
         builder.field(SUCCESSFUL_SHARDS, successfulShards);
         builder.startArray(FAILURES);
         for (SnapshotShardFailure shardFailure : shardFailures) {
-            builder.startObject();
             shardFailure.toXContent(builder, params);
-            builder.endObject();
         }
         builder.endArray();
         builder.endObject();
diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java
index a4971609672..10e92b617d3 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java
@@ -187,6 +187,7 @@ public class SnapshotShardFailure extends ShardOperationFailedException {
 
     @Override
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+        builder.startObject();
         builder.field("index", shardId.getIndexName());
         builder.field("index_uuid", shardId.getIndexName());
         builder.field("shard_id", shardId.id());
@@ -195,6 +196,7 @@ public class SnapshotShardFailure extends ShardOperationFailedException {
             builder.field("node_id", nodeId);
         }
         builder.field("status", status.name());
+        builder.endObject();
         return builder;
     }
 
diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java
index bafe3b7403d..7c5cc2bc802 100644
--- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java
+++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java
@@ -22,6 +22,7 @@ package org.elasticsearch.common.xcontent;
 import com.fasterxml.jackson.core.JsonGenerationException;
 import com.fasterxml.jackson.core.JsonGenerator;
 import com.fasterxml.jackson.core.JsonParseException;
+
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.Constants;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
@@ -752,7 +753,7 @@ public abstract class BaseXContentTestCase extends ESTestCase {
                     .field("xcontent", xcontent0)
                 .endObject());
 
-        ToXContent xcontent1 = (builder, params) -> {
+        ToXContentObject xcontent1 = (builder, params) -> {
             builder.startObject();
             builder.field("field", "value");
             builder.startObject("foo");
@@ -762,7 +763,7 @@ public abstract class BaseXContentTestCase extends ESTestCase {
             return builder;
         };
 
-        ToXContent xcontent2 = (builder, params) -> {
+        ToXContentObject xcontent2 = (builder, params) -> {
             builder.startObject();
             builder.field("root", xcontent0);
             builder.array("childs", xcontent0, xcontent1);
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java
index b44e192f407..5f6a4b35a9e 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java
@@ -168,9 +168,7 @@ public class GraphExploreResponse extends ActionResponse implements ToXContentOb
         builder.startArray(FAILURES.getPreferredName());
         if (shardFailures != null) {
             for (ShardOperationFailedException shardFailure : shardFailures) {
-                builder.startObject();
                 shardFailure.toXContent(builder, params);
-                builder.endObject();
             }
         }
         builder.endArray();
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java
index d28d14a0ac0..f544c21a15c 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java
@@ -17,7 +17,8 @@ import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContent.Params;
+import org.elasticsearch.common.xcontent.ToXContentFragment;
 import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.xpack.core.rollup.RollupField;
@@ -43,7 +44,7 @@ public class GetRollupCapsAction extends Action<GetRollupCapsAction.Response> {
         return new Response();
     }
 
-    public static class Request extends ActionRequest implements ToXContent {
+    public static class Request extends ActionRequest implements ToXContentFragment {
         private String indexPattern;
 
         public Request(String indexPattern) {
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java
index 4f95919c498..9dcd673c39f 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java
@@ -19,7 +19,8 @@ import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContent.Params;
+import org.elasticsearch.common.xcontent.ToXContentFragment;
 import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.xpack.core.rollup.RollupField;
@@ -47,7 +48,7 @@ public class GetRollupIndexCapsAction extends Action<GetRollupIndexCapsAction.Re
         return new Response();
     }
 
-    public static class Request extends ActionRequest implements IndicesRequest.Replaceable, ToXContent {
+    public static class Request extends ActionRequest implements IndicesRequest.Replaceable, ToXContentFragment {
         private String[] indices;
         private IndicesOptions options;
 
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java
index 4b33e018826..913e544e741 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java
@@ -21,7 +21,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.xcontent.ConstructingObjectParser;
-import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.tasks.Task;
@@ -58,7 +57,7 @@ public class GetRollupJobsAction extends Action<GetRollupJobsAction.Response> {
         return Response::new;
     }
 
-    public static class Request extends BaseTasksRequest<Request> implements ToXContent {
+    public static class Request extends BaseTasksRequest<Request> implements ToXContentObject {
         private String id;
 
         public Request(String id) {
@@ -107,7 +106,9 @@ public class GetRollupJobsAction extends Action<GetRollupJobsAction.Response> {
 
         @Override
         public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+            builder.startObject();
             builder.field(RollupField.ID.getPreferredName(), id);
+            builder.endObject();
             return builder;
         }
 
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java
index ff803b13628..ca2a5cd8d72 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java
@@ -15,7 +15,6 @@ import org.elasticsearch.client.ElasticsearchClient;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
@@ -44,7 +43,7 @@ public class StartRollupJobAction extends Action<StartRollupJobAction.Response>
         return Response::new;
     }
 
-    public static class Request extends BaseTasksRequest<Request> implements ToXContent {
+    public static class Request extends BaseTasksRequest<Request> implements ToXContentObject {
         private String id;
 
         public Request(String id) {
@@ -75,7 +74,9 @@ public class StartRollupJobAction extends Action<StartRollupJobAction.Response>
 
         @Override
         public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+            builder.startObject();
             builder.field(RollupField.ID.getPreferredName(), id);
+            builder.endObject();
             return builder;
         }
 
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java
index dadc54726b5..6fc079e0328 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java
@@ -18,7 +18,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
@@ -51,7 +50,7 @@ public class StopRollupJobAction extends Action<StopRollupJobAction.Response> {
         return Response::new;
     }
 
-    public static class Request extends BaseTasksRequest<Request> implements ToXContent {
+    public static class Request extends BaseTasksRequest<Request> implements ToXContentObject {
         private String id;
         private boolean waitForCompletion = false;
         private TimeValue timeout = null;
@@ -106,11 +105,13 @@ public class StopRollupJobAction extends Action<StopRollupJobAction.Response> {
 
         @Override
         public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+            builder.startObject();
             builder.field(RollupField.ID.getPreferredName(), id);
             builder.field(WAIT_FOR_COMPLETION.getPreferredName(), waitForCompletion);
             if (timeout != null) {
                 builder.field(TIMEOUT.getPreferredName(), timeout);
             }
+            builder.endObject();
             return builder;
         }
 
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponse.java
index 9f11c48c96a..7e25683a2a9 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponse.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponse.java
@@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionResponse;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.xpack.core.security.authc.support.TokensInvalidationResult;
 
@@ -18,7 +19,7 @@ import java.util.Objects;
 /**
  * Response for a invalidation of one or multiple tokens.
  */
-public final class InvalidateTokenResponse extends ActionResponse implements ToXContent {
+public final class InvalidateTokenResponse extends ActionResponse implements ToXContentObject {
 
     private TokensInvalidationResult result;
 
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java
index d77882d6454..59f9eafec1c 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java
@@ -16,7 +16,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser;
 import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
 import org.elasticsearch.common.xcontent.NamedXContentRegistry;
 import org.elasticsearch.common.xcontent.ObjectParser;
-import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentFactory;
 import org.elasticsearch.common.xcontent.XContentHelper;
@@ -44,7 +44,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona
 /**
  * Representation of a Mustache template for expressing one or more roles names in a {@link ExpressionRoleMapping}.
  */
-public class TemplateRoleName implements ToXContent, Writeable {
+public class TemplateRoleName implements ToXContentObject, Writeable {
 
     private static final ConstructingObjectParser<TemplateRoleName, Void> PARSER = new ConstructingObjectParser<>(
         "role-mapping-template", false, arr -> new TemplateRoleName((BytesReference) arr[0], (Format) arr[1]));
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java
index 18ec33f5dfb..fe79fdc33f9 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java
@@ -10,7 +10,7 @@ import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.lucene.uid.Versions;
-import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.index.seqno.SequenceNumbers;
 import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource;
@@ -19,7 +19,7 @@ import org.elasticsearch.xpack.core.watcher.watch.WatchStatus;
 import java.io.IOException;
 import java.util.Objects;
 
-public class GetWatchResponse extends ActionResponse implements ToXContent {
+public class GetWatchResponse extends ActionResponse implements ToXContentObject {
 
     private String id;
     private WatchStatus status;
@@ -122,6 +122,7 @@ public class GetWatchResponse extends ActionResponse implements ToXContent {
 
     @Override
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+        builder.startObject();
         builder.field("found", found);
         builder.field("_id", id);
         if (found) {
@@ -131,6 +132,7 @@ public class GetWatchResponse extends ActionResponse implements ToXContent {
             builder.field("status", status,  params);
             builder.field("watch", source, params);
         }
+        builder.endObject();
         return builder;
     }
 
diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestGetWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestGetWatchAction.java
index 15cb9612445..0d9b1ee6a19 100644
--- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestGetWatchAction.java
+++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestGetWatchAction.java
@@ -48,9 +48,7 @@ public class RestGetWatchAction extends WatcherRestHandler {
         return channel -> client.getWatch(getWatchRequest, new RestBuilderListener<GetWatchResponse>(channel) {
             @Override
             public RestResponse buildResponse(GetWatchResponse response, XContentBuilder builder) throws Exception {
-                builder.startObject();
                 response.toXContent(builder, request);
-                builder.endObject();
                 RestStatus status = response.isFound() ? OK : NOT_FOUND;
                 return new BytesRestResponse(status, builder);
             }

From fe9442b05b453de33f3054e80c5dd043036216ff Mon Sep 17 00:00:00 2001
From: Ioannis Kakavas <ikakavas@protonmail.com>
Date: Mon, 15 Apr 2019 12:41:16 +0300
Subject: [PATCH 017/112] Add an OpenID Connect authentication realm (#40674)
 (#41178)

This commit adds an OpenID Connect authentication realm to
elasticsearch. Elasticsearch (with the assistance of kibana or
another web component) acts as an OpenID Connect Relying
Party and supports the Authorization Code Grant and Implicit
flows as described in http://ela.st/oidc-spec. It adds support
for consuming and verifying signed ID Tokens, both RP
initiated and 3rd party initiated Single Sign on and RP
initiated signle logout.
It also adds an OpenID Connect Provider in the idp-fixture to
be used for the associated integration tests.

This is a backport of #40674
---
 .../oidc/OpenIdConnectAuthenticateAction.java |  32 +
 .../OpenIdConnectAuthenticateRequest.java     | 108 +++
 ...enIdConnectAuthenticateRequestBuilder.java |  36 +
 .../OpenIdConnectAuthenticateResponse.java    |  65 ++
 .../oidc/OpenIdConnectLogoutAction.java       |  29 +
 .../oidc/OpenIdConnectLogoutRequest.java      |  71 ++
 .../oidc/OpenIdConnectLogoutResponse.java     |  45 +
 ...nIdConnectPrepareAuthenticationAction.java |  29 +
 ...IdConnectPrepareAuthenticationRequest.java | 121 +++
 ...ctPrepareAuthenticationRequestBuilder.java |  25 +
 ...dConnectPrepareAuthenticationResponse.java |  83 ++
 .../authc/InternalRealmsSettings.java         |   2 +
 .../core/security/authc/RealmSettings.java    |  13 +
 .../oidc/OpenIdConnectRealmSettings.java      | 208 +++++
 .../authz/privilege/ClusterPrivilege.java     |   3 +
 x-pack/plugin/security/build.gradle           |  24 +-
 .../security/forbidden/oidc-signatures.txt    |   3 +
 .../licenses/accessors-smart-1.2.jar.sha1     |   1 +
 .../licenses/accessors-smart-LICENSE.txt      | 202 +++++
 .../licenses/accessors-smart-NOTICE.txt       |   0
 .../plugin/security/licenses/asm-7.1.jar.sha1 |   1 +
 .../plugin/security/licenses/asm-LICENSE.txt  |  26 +
 .../plugin/security/licenses/asm-NOTICE.txt   |   1 +
 .../licenses/jakarta.mail-1.6.3.jar.sha1      |   1 +
 .../licenses/jakarta.mail-LICENSE.txt         | 637 ++++++++++++++
 .../security/licenses/jakarta.mail-NOTICE.txt |  50 ++
 .../licenses/jcip-annotations-1.0.jar.sha1    |   1 +
 .../licenses/jcip-annotations-LICENSE.txt     | 202 +++++
 .../licenses/jcip-annotations-NOTICE.txt      |   0
 .../security/licenses/json-smart-2.3.jar.sha1 |   1 +
 .../security/licenses/json-smart-LICENSE.txt  | 202 +++++
 .../security/licenses/json-smart-NOTICE.txt   |   0
 .../security/licenses/lang-tag-1.4.4.jar.sha1 |   1 +
 .../security/licenses/lang-tag-LICENSE.txt    | 202 +++++
 .../security/licenses/lang-tag-NOTICE.txt     |  14 +
 .../licenses/nimbus-jose-jwt-4.41.2.jar.sha1  |   1 +
 .../licenses/nimbus-jose-jwt-LICENSE.txt      | 202 +++++
 .../licenses/nimbus-jose-jwt-NOTICE.txt       |  14 +
 .../licenses/oauth2-oidc-sdk-6.5.jar.sha1     |   1 +
 .../licenses/oauth2-oidc-sdk-LICENSE.txt      | 202 +++++
 .../licenses/oauth2-oidc-sdk-NOTICE.txt       |  14 +
 .../xpack/security/Security.java              |  16 +
 ...nsportOpenIdConnectAuthenticateAction.java |  83 ++
 .../TransportOpenIdConnectLogoutAction.java   | 135 +++
 ...nIdConnectPrepareAuthenticationAction.java |  82 ++
 .../xpack/security/authc/InternalRealms.java  |  17 +-
 .../oidc/OpenIdConnectAuthenticator.java      | 722 ++++++++++++++++
 .../OpenIdConnectProviderConfiguration.java   |  64 ++
 .../authc/oidc/OpenIdConnectRealm.java        | 473 ++++++++++
 .../authc/oidc/OpenIdConnectToken.java        |  68 ++
 .../authc/oidc/RelyingPartyConfiguration.java |  68 ++
 .../oidc/OpenIdConnectBaseRestHandler.java    |  40 +
 .../RestOpenIdConnectAuthenticateAction.java  |  74 ++
 .../oidc/RestOpenIdConnectLogoutAction.java   |  69 ++
 ...nIdConnectPrepareAuthenticationAction.java |  71 ++
 ...OpenIdConnectAuthenticateRequestTests.java |  55 ++
 ...nectPrepareAuthenticationRequestTests.java |  73 ++
 ...ansportOpenIdConnectLogoutActionTests.java | 230 +++++
 .../security/authc/InternalRealmsTests.java   |   3 +-
 .../authc/SecurityRealmSettingsTests.java     |  16 +-
 .../oidc/OpenIdConnectAuthenticatorTests.java | 808 ++++++++++++++++++
 .../oidc/OpenIdConnectRealmSettingsTests.java | 256 ++++++
 .../authc/oidc/OpenIdConnectRealmTests.java   | 341 ++++++++
 .../authc/oidc/OpenIdConnectTestCase.java     | 112 +++
 x-pack/qa/oidc-op-tests/build.gradle          |  84 ++
 .../authc/oidc/OpenIdConnectAuthIT.java       | 394 +++++++++
 x-pack/test/idp-fixture/docker-compose.yml    |   7 +
 x-pack/test/idp-fixture/oidc/op-jwks.json     |   1 +
 .../test/idp-fixture/oidc/override.properties |   4 +
 69 files changed, 7229 insertions(+), 10 deletions(-)
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateAction.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequestBuilder.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateResponse.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutAction.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutRequest.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutResponse.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationAction.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequest.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequestBuilder.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationResponse.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/oidc/OpenIdConnectRealmSettings.java
 create mode 100644 x-pack/plugin/security/forbidden/oidc-signatures.txt
 create mode 100644 x-pack/plugin/security/licenses/accessors-smart-1.2.jar.sha1
 create mode 100644 x-pack/plugin/security/licenses/accessors-smart-LICENSE.txt
 create mode 100644 x-pack/plugin/security/licenses/accessors-smart-NOTICE.txt
 create mode 100644 x-pack/plugin/security/licenses/asm-7.1.jar.sha1
 create mode 100644 x-pack/plugin/security/licenses/asm-LICENSE.txt
 create mode 100644 x-pack/plugin/security/licenses/asm-NOTICE.txt
 create mode 100644 x-pack/plugin/security/licenses/jakarta.mail-1.6.3.jar.sha1
 create mode 100644 x-pack/plugin/security/licenses/jakarta.mail-LICENSE.txt
 create mode 100644 x-pack/plugin/security/licenses/jakarta.mail-NOTICE.txt
 create mode 100644 x-pack/plugin/security/licenses/jcip-annotations-1.0.jar.sha1
 create mode 100644 x-pack/plugin/security/licenses/jcip-annotations-LICENSE.txt
 create mode 100644 x-pack/plugin/security/licenses/jcip-annotations-NOTICE.txt
 create mode 100644 x-pack/plugin/security/licenses/json-smart-2.3.jar.sha1
 create mode 100644 x-pack/plugin/security/licenses/json-smart-LICENSE.txt
 create mode 100644 x-pack/plugin/security/licenses/json-smart-NOTICE.txt
 create mode 100644 x-pack/plugin/security/licenses/lang-tag-1.4.4.jar.sha1
 create mode 100644 x-pack/plugin/security/licenses/lang-tag-LICENSE.txt
 create mode 100644 x-pack/plugin/security/licenses/lang-tag-NOTICE.txt
 create mode 100644 x-pack/plugin/security/licenses/nimbus-jose-jwt-4.41.2.jar.sha1
 create mode 100644 x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt
 create mode 100644 x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt
 create mode 100644 x-pack/plugin/security/licenses/oauth2-oidc-sdk-6.5.jar.sha1
 create mode 100644 x-pack/plugin/security/licenses/oauth2-oidc-sdk-LICENSE.txt
 create mode 100644 x-pack/plugin/security/licenses/oauth2-oidc-sdk-NOTICE.txt
 create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java
 create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutAction.java
 create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectPrepareAuthenticationAction.java
 create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java
 create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectProviderConfiguration.java
 create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java
 create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectToken.java
 create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/RelyingPartyConfiguration.java
 create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/OpenIdConnectBaseRestHandler.java
 create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/RestOpenIdConnectAuthenticateAction.java
 create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/RestOpenIdConnectLogoutAction.java
 create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/RestOpenIdConnectPrepareAuthenticationAction.java
 create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/OpenIdConnectAuthenticateRequestTests.java
 create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/OpenIdConnectPrepareAuthenticationRequestTests.java
 create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java
 create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java
 create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java
 create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java
 create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java
 create mode 100644 x-pack/qa/oidc-op-tests/build.gradle
 create mode 100644 x-pack/qa/oidc-op-tests/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthIT.java
 create mode 100644 x-pack/test/idp-fixture/oidc/op-jwks.json
 create mode 100644 x-pack/test/idp-fixture/oidc/override.properties

diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateAction.java
new file mode 100644
index 00000000000..b27a71e202e
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateAction.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.security.action.oidc;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.common.io.stream.Writeable;
+
+/**
+ * Action for initiating an authentication process using OpenID Connect
+ */
+public final class OpenIdConnectAuthenticateAction extends Action<OpenIdConnectAuthenticateResponse> {
+
+    public static final OpenIdConnectAuthenticateAction INSTANCE = new OpenIdConnectAuthenticateAction();
+    public static final String NAME = "cluster:admin/xpack/security/oidc/authenticate";
+
+    private OpenIdConnectAuthenticateAction() {
+        super(NAME);
+    }
+
+    @Override
+    public OpenIdConnectAuthenticateResponse newResponse() {
+        throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+    }
+
+    @Override
+    public Writeable.Reader<OpenIdConnectAuthenticateResponse> getResponseReader() {
+        return OpenIdConnectAuthenticateResponse::new;
+    }
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java
new file mode 100644
index 00000000000..1e27e02e607
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.security.action.oidc;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Represents a request for authentication using OpenID Connect
+ */
+public class OpenIdConnectAuthenticateRequest extends ActionRequest {
+
+    /**
+     * The URI where the OP redirected the browser after the authentication attempt. This is passed as is from the
+     * facilitator entity (i.e. Kibana)
+     */
+    private String redirectUri;
+
+    /**
+     * The state value that we generated or the facilitator provided for this specific flow and that should be stored at the user's session
+     * with the facilitator
+     */
+    private String state;
+
+    /**
+     * The nonce value that we generated or the facilitator provided for this specific flow and that should be stored at the user's session
+     * with the facilitator
+     */
+    private String nonce;
+
+    public OpenIdConnectAuthenticateRequest() {
+
+    }
+
+    public OpenIdConnectAuthenticateRequest(StreamInput in) throws IOException {
+        super.readFrom(in);
+        redirectUri = in.readString();
+        state = in.readString();
+        nonce = in.readString();
+    }
+
+    public String getRedirectUri() {
+        return redirectUri;
+    }
+
+    public void setRedirectUri(String redirectUri) {
+        this.redirectUri = redirectUri;
+    }
+
+    public String getState() {
+        return state;
+    }
+
+    public void setState(String state) {
+        this.state = state;
+    }
+
+    public String getNonce() {
+        return nonce;
+    }
+
+    public void setNonce(String nonce) {
+        this.nonce = nonce;
+    }
+
+    @Override
+    public ActionRequestValidationException validate() {
+        ActionRequestValidationException validationException = null;
+        if (Strings.isNullOrEmpty(state)) {
+            validationException = addValidationError("state parameter is missing", validationException);
+        }
+        if (Strings.isNullOrEmpty(nonce)) {
+            validationException = addValidationError("nonce parameter is missing", validationException);
+        }
+        if (Strings.isNullOrEmpty(redirectUri)) {
+            validationException = addValidationError("redirect_uri parameter is missing", validationException);
+        }
+        return validationException;
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        super.writeTo(out);
+        out.writeString(redirectUri);
+        out.writeString(state);
+        out.writeString(nonce);
+    }
+
+    @Override
+    public void readFrom(StreamInput in) {
+        throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+    }
+
+    public String toString() {
+        return "{redirectUri=" + redirectUri + ", state=" + state + ", nonce=" + nonce + "}";
+    }
+}
+
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequestBuilder.java
new file mode 100644
index 00000000000..cbdd13aec04
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequestBuilder.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.security.action.oidc;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.client.ElasticsearchClient;
+
+/**
+ * Request builder for populating a {@link OpenIdConnectAuthenticateRequest}
+ */
+public class OpenIdConnectAuthenticateRequestBuilder
+    extends ActionRequestBuilder<OpenIdConnectAuthenticateRequest, OpenIdConnectAuthenticateResponse> {
+
+    public OpenIdConnectAuthenticateRequestBuilder(ElasticsearchClient client) {
+        super(client, OpenIdConnectAuthenticateAction.INSTANCE, new OpenIdConnectAuthenticateRequest());
+    }
+
+    public OpenIdConnectAuthenticateRequestBuilder redirectUri(String redirectUri) {
+        request.setRedirectUri(redirectUri);
+        return this;
+    }
+
+    public OpenIdConnectAuthenticateRequestBuilder state(String state) {
+        request.setState(state);
+        return this;
+    }
+
+    public OpenIdConnectAuthenticateRequestBuilder nonce(String nonce) {
+        request.setNonce(nonce);
+        return this;
+    }
+
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateResponse.java
new file mode 100644
index 00000000000..93b7c6b292a
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateResponse.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.security.action.oidc;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.io.IOException;
+
+public class OpenIdConnectAuthenticateResponse extends ActionResponse {
+    private String principal;
+    private String accessTokenString;
+    private String refreshTokenString;
+    private TimeValue expiresIn;
+
+    public OpenIdConnectAuthenticateResponse(String principal, String accessTokenString, String refreshTokenString, TimeValue expiresIn) {
+        this.principal = principal;
+        this.accessTokenString = accessTokenString;
+        this.refreshTokenString = refreshTokenString;
+        this.expiresIn = expiresIn;
+    }
+
+    public OpenIdConnectAuthenticateResponse(StreamInput in) throws IOException {
+        super.readFrom(in);
+        principal = in.readString();
+        accessTokenString = in.readString();
+        refreshTokenString = in.readString();
+        expiresIn = in.readTimeValue();
+    }
+
+    public String getPrincipal() {
+        return principal;
+    }
+
+    public String getAccessTokenString() {
+        return accessTokenString;
+    }
+
+    public String getRefreshTokenString() {
+        return refreshTokenString;
+    }
+
+    public TimeValue getExpiresIn() {
+        return expiresIn;
+    }
+
+    @Override
+    public void readFrom(StreamInput in) {
+        throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        super.writeTo(out);
+        out.writeString(principal);
+        out.writeString(accessTokenString);
+        out.writeString(refreshTokenString);
+        out.writeTimeValue(expiresIn);
+    }
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutAction.java
new file mode 100644
index 00000000000..482484a7ded
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutAction.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.security.action.oidc;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.common.io.stream.Writeable;
+
+public class OpenIdConnectLogoutAction extends Action<OpenIdConnectLogoutResponse> {
+
+    public static final OpenIdConnectLogoutAction INSTANCE = new OpenIdConnectLogoutAction();
+    public static final String NAME = "cluster:admin/xpack/security/oidc/logout";
+
+    private OpenIdConnectLogoutAction() {
+        super(NAME);
+    }
+
+    @Override
+    public OpenIdConnectLogoutResponse newResponse() {
+        throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+    }
+
+    @Override
+    public Writeable.Reader<OpenIdConnectLogoutResponse> getResponseReader() {
+        return OpenIdConnectLogoutResponse::new;
+    }
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutRequest.java
new file mode 100644
index 00000000000..777df403eca
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutRequest.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.security.action.oidc;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+public final class OpenIdConnectLogoutRequest extends ActionRequest {
+
+    private String token;
+    @Nullable
+    private String refreshToken;
+
+    public OpenIdConnectLogoutRequest() {
+
+    }
+
+    public OpenIdConnectLogoutRequest(StreamInput in) throws IOException {
+        super.readFrom(in);
+        token = in.readString();
+        refreshToken = in.readOptionalString();
+    }
+
+    @Override
+    public ActionRequestValidationException validate() {
+        ActionRequestValidationException validationException = null;
+        if (Strings.isNullOrEmpty(token)) {
+            validationException = addValidationError("token is missing", validationException);
+        }
+        return validationException;
+    }
+
+    public String getToken() {
+        return token;
+    }
+
+    public void setToken(String token) {
+        this.token = token;
+    }
+
+    public String getRefreshToken() {
+        return refreshToken;
+    }
+
+    public void setRefreshToken(String refreshToken) {
+        this.refreshToken = refreshToken;
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        super.writeTo(out);
+        out.writeString(token);
+        out.writeOptionalString(refreshToken);
+    }
+
+    @Override
+    public void readFrom(StreamInput in) {
+        throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+    }
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutResponse.java
new file mode 100644
index 00000000000..e725701e01c
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectLogoutResponse.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.security.action.oidc;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+public final class OpenIdConnectLogoutResponse extends ActionResponse {
+
+    private String endSessionUrl;
+
+    public OpenIdConnectLogoutResponse(StreamInput in) throws IOException {
+        super.readFrom(in);
+        this.endSessionUrl = in.readString();
+    }
+
+    public OpenIdConnectLogoutResponse(String endSessionUrl) {
+        this.endSessionUrl = endSessionUrl;
+    }
+
+    @Override
+    public void readFrom(StreamInput in) {
+        throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        super.writeTo(out);
+        out.writeString(endSessionUrl);
+    }
+
+    public String toString() {
+        return "{endSessionUrl=" + endSessionUrl + "}";
+    }
+
+    public String getEndSessionUrl() {
+        return endSessionUrl;
+    }
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationAction.java
new file mode 100644
index 00000000000..2aa82c7286c
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationAction.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.security.action.oidc;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.common.io.stream.Writeable;
+
+public class OpenIdConnectPrepareAuthenticationAction extends Action<OpenIdConnectPrepareAuthenticationResponse> {
+
+    public static final OpenIdConnectPrepareAuthenticationAction INSTANCE = new OpenIdConnectPrepareAuthenticationAction();
+    public static final String NAME = "cluster:admin/xpack/security/oidc/prepare";
+
+    private OpenIdConnectPrepareAuthenticationAction() {
+        super(NAME);
+    }
+
+    @Override
+    public OpenIdConnectPrepareAuthenticationResponse newResponse() {
+        throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+    }
+
+    @Override
+    public Writeable.Reader<OpenIdConnectPrepareAuthenticationResponse> getResponseReader() {
+        return OpenIdConnectPrepareAuthenticationResponse::new;
+    }
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequest.java
new file mode 100644
index 00000000000..8f6d616981b
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequest.java
@@ -0,0 +1,121 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.security.action.oidc;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * Represents a request to prepare an OAuth 2.0 authorization request
+ */
+public class OpenIdConnectPrepareAuthenticationRequest extends ActionRequest {
+
+    /**
+     * The name of the OpenID Connect realm in the configuration that should be used for authentication
+     */
+    private String realmName;
+    /**
+     * In case of a
+     * <a href="https://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin">3rd party initiated authentication</a>, the
+     * issuer that the User Agent needs to be redirected to for authentication
+     */
+    private String issuer;
+    private String loginHint;
+    private String state;
+    private String nonce;
+
+    public String getRealmName() {
+        return realmName;
+    }
+
+    public String getState() {
+        return state;
+    }
+
+    public String getNonce() {
+        return nonce;
+    }
+
+    public String getIssuer() {
+        return issuer;
+    }
+
+    public String getLoginHint() {
+        return loginHint;
+    }
+
+    public void setRealmName(String realmName) {
+        this.realmName = realmName;
+    }
+
+    public void setIssuer(String issuer) {
+        this.issuer = issuer;
+    }
+
+    public void setState(String state) {
+        this.state = state;
+    }
+
+    public void setNonce(String nonce) {
+        this.nonce = nonce;
+    }
+
+    public void setLoginHint(String loginHint) {
+        this.loginHint = loginHint;
+    }
+
+    public OpenIdConnectPrepareAuthenticationRequest() {
+    }
+
+    public OpenIdConnectPrepareAuthenticationRequest(StreamInput in) throws IOException {
+        super.readFrom(in);
+        realmName = in.readOptionalString();
+        issuer = in.readOptionalString();
+        loginHint = in.readOptionalString();
+        state = in.readOptionalString();
+        nonce = in.readOptionalString();
+    }
+
+    @Override
+    public ActionRequestValidationException validate() {
+        ActionRequestValidationException validationException = null;
+        if (Strings.hasText(realmName) == false && Strings.hasText(issuer) == false) {
+            validationException = addValidationError("one of [realm, issuer] must be provided", null);
+        }
+        if (Strings.hasText(realmName) && Strings.hasText(issuer)) {
+            validationException = addValidationError("only one of [realm, issuer] can be provided in the same request", null);
+        }
+        return validationException;
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        super.writeTo(out);
+        out.writeOptionalString(realmName);
+        out.writeOptionalString(issuer);
+        out.writeOptionalString(loginHint);
+        out.writeOptionalString(state);
+        out.writeOptionalString(nonce);
+    }
+
+    @Override
+    public void readFrom(StreamInput in) {
+        throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+    }
+
+    public String toString() {
+        return "{realmName=" + realmName + ", issuer=" + issuer + ", login_hint=" +
+            loginHint + ", state=" + state + ", nonce=" + nonce + "}";
+    }
+
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequestBuilder.java
new file mode 100644
index 00000000000..b7992345a10
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequestBuilder.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.security.action.oidc;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.client.ElasticsearchClient;
+
+/**
+ * Request builder for populating a {@link OpenIdConnectPrepareAuthenticationRequest}
+ */
+public class OpenIdConnectPrepareAuthenticationRequestBuilder
+    extends ActionRequestBuilder<OpenIdConnectPrepareAuthenticationRequest, OpenIdConnectPrepareAuthenticationResponse> {
+
+    public OpenIdConnectPrepareAuthenticationRequestBuilder(ElasticsearchClient client) {
+        super(client, OpenIdConnectPrepareAuthenticationAction.INSTANCE, new OpenIdConnectPrepareAuthenticationRequest());
+    }
+
+    public OpenIdConnectPrepareAuthenticationRequestBuilder realmName(String name) {
+        request.setRealmName(name);
+        return this;
+    }
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationResponse.java
new file mode 100644
index 00000000000..c8a70e65b81
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationResponse.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.security.action.oidc;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ToXContentObject;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * A response object that contains the OpenID Connect Authentication Request as a URL and the state and nonce values that were
+ * generated for this request.
+ */
+public class OpenIdConnectPrepareAuthenticationResponse extends ActionResponse implements ToXContentObject {
+
+    private String authenticationRequestUrl;
+    /*
+     * The oAuth2 state parameter used for CSRF protection.
+     */
+    private String state;
+    /*
+     * String value used to associate a Client session with an ID Token, and to mitigate replay attacks.
+     */
+    private String nonce;
+
+    public OpenIdConnectPrepareAuthenticationResponse(String authorizationEndpointUrl, String state, String nonce) {
+        this.authenticationRequestUrl = authorizationEndpointUrl;
+        this.state = state;
+        this.nonce = nonce;
+    }
+
+    public OpenIdConnectPrepareAuthenticationResponse(StreamInput in) throws IOException {
+        super.readFrom(in);
+        authenticationRequestUrl = in.readString();
+        state = in.readString();
+        nonce = in.readString();
+    }
+
+    public String getAuthenticationRequestUrl() {
+        return authenticationRequestUrl;
+    }
+
+    public String getState() {
+        return state;
+    }
+
+    public String getNonce() {
+        return nonce;
+    }
+
+    @Override
+    public void readFrom(StreamInput in) throws IOException {
+        throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        super.writeTo(out);
+        out.writeString(authenticationRequestUrl);
+        out.writeString(state);
+        out.writeString(nonce);
+    }
+
+    public String toString() {
+        return "{authenticationRequestUrl=" + authenticationRequestUrl + ", state=" + state + ", nonce=" + nonce + "}";
+    }
+
+    @Override
+    public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+        builder.startObject();
+        builder.field("redirect", authenticationRequestUrl);
+        builder.field("state", state);
+        builder.field("nonce", nonce);
+        builder.endObject();
+        return builder;
+    }
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/InternalRealmsSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/InternalRealmsSettings.java
index 8b2ef184068..dd4a8433452 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/InternalRealmsSettings.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/InternalRealmsSettings.java
@@ -10,6 +10,7 @@ import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings;
+import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings;
 
@@ -34,6 +35,7 @@ public final class InternalRealmsSettings {
         set.addAll(PkiRealmSettings.getSettings());
         set.addAll(SamlRealmSettings.getSettings());
         set.addAll(KerberosRealmSettings.getSettings());
+        set.addAll(OpenIdConnectRealmSettings.getSettings());
         return Collections.unmodifiableSet(set);
     }
 }
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java
index 913fcba3d33..0c35525f1de 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java
@@ -6,6 +6,8 @@
 package org.elasticsearch.xpack.core.security.authc;
 
 import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.SecureSetting;
+import org.elasticsearch.common.settings.SecureString;
 import org.elasticsearch.common.settings.Setting;
 import org.elasticsearch.common.settings.Settings;
 
@@ -55,6 +57,17 @@ public class RealmSettings {
         return Setting.affixKeySetting(realmSettingPrefix(realmType), suffix, key -> Setting.simpleString(key, properties));
     }
 
+    /**
+     * Create a {@link SecureSetting#secureString secure string} {@link Setting} object of a realm of
+     * with the provided type and setting suffix.
+     *
+     * @param realmType The type of the realm, used within the setting prefix
+     * @param suffix    The suffix of the setting (everything following the realm name in the affix setting)
+     */
+    public static Setting.AffixSetting<SecureString> secureString(String realmType, String suffix) {
+        return Setting.affixKeySetting(realmSettingPrefix(realmType), suffix, key -> SecureSetting.secureString(key, null));
+    }
+
     /**
      * Create a {@link Function} that acts as a factory an {@link org.elasticsearch.common.settings.Setting.AffixSetting}.
      * The {@code Function} takes the <em>realm-type</em> as an argument.
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/oidc/OpenIdConnectRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/oidc/OpenIdConnectRealmSettings.java
new file mode 100644
index 00000000000..b88056a4f24
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/oidc/OpenIdConnectRealmSettings.java
@@ -0,0 +1,208 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.security.authc.oidc;
+
+import org.elasticsearch.common.settings.SecureString;
+import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.set.Sets;
+import org.elasticsearch.xpack.core.security.authc.RealmConfig;
+import org.elasticsearch.xpack.core.security.authc.RealmSettings;
+import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings;
+import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.function.Function;
+
+
+public class OpenIdConnectRealmSettings {
+
+    private OpenIdConnectRealmSettings() {
+    }
+
+    private static final List<String> SUPPORTED_SIGNATURE_ALGORITHMS = Collections.unmodifiableList(
+        Arrays.asList("HS256", "HS384", "HS512", "RS256", "RS384", "RS512", "ES256", "ES384", "ES512", "PS256", "PS384", "PS512"));
+    private static final List<String> RESPONSE_TYPES = Arrays.asList("code", "id_token", "id_token token");
+    public static final String TYPE = "oidc";
+
+    public static final Setting.AffixSetting<String> RP_CLIENT_ID
+        = RealmSettings.simpleString(TYPE, "rp.client_id", Setting.Property.NodeScope);
+    public static final Setting.AffixSetting<SecureString> RP_CLIENT_SECRET
+        = RealmSettings.secureString(TYPE, "rp.client_secret");
+    public static final Setting.AffixSetting<String> RP_REDIRECT_URI
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "rp.redirect_uri",
+        key -> Setting.simpleString(key, v -> {
+            try {
+                new URI(v);
+            } catch (URISyntaxException e) {
+                throw new IllegalArgumentException("Invalid value [" + v + "] for [" + key + "]. Not a valid URI.", e);
+            }
+        }, Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<String> RP_POST_LOGOUT_REDIRECT_URI
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "rp.post_logout_redirect_uri",
+        key -> Setting.simpleString(key, v -> {
+            try {
+                new URI(v);
+            } catch (URISyntaxException e) {
+                throw new IllegalArgumentException("Invalid value [" + v + "] for [" + key + "]. Not a valid URI.", e);
+            }
+        }, Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<String> RP_RESPONSE_TYPE
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "rp.response_type",
+        key -> Setting.simpleString(key, v -> {
+            if (RESPONSE_TYPES.contains(v) == false) {
+                throw new IllegalArgumentException(
+                    "Invalid value [" + v + "] for [" + key + "]. Allowed values are " + RESPONSE_TYPES + "");
+            }
+        }, Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<String> RP_SIGNATURE_ALGORITHM
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "rp.signature_algorithm",
+        key -> new Setting<>(key, "RS256", Function.identity(), v -> {
+            if (SUPPORTED_SIGNATURE_ALGORITHMS.contains(v) == false) {
+                throw new IllegalArgumentException(
+                    "Invalid value [" + v + "] for [" + key + "]. Allowed values are " + SUPPORTED_SIGNATURE_ALGORITHMS + "}]");
+            }
+        }, Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<List<String>> RP_REQUESTED_SCOPES = Setting.affixKeySetting(
+        RealmSettings.realmSettingPrefix(TYPE), "rp.requested_scopes",
+        key -> Setting.listSetting(key, Collections.singletonList("openid"), Function.identity(), Setting.Property.NodeScope));
+
+    public static final Setting.AffixSetting<String> OP_NAME
+        = RealmSettings.simpleString(TYPE, "op.name", Setting.Property.NodeScope);
+    public static final Setting.AffixSetting<String> OP_AUTHORIZATION_ENDPOINT
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "op.authorization_endpoint",
+        key -> Setting.simpleString(key, v -> {
+            try {
+                new URI(v);
+            } catch (URISyntaxException e) {
+                throw new IllegalArgumentException("Invalid value [" + v + "] for [" + key + "]. Not a valid URI.", e);
+            }
+        }, Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<String> OP_TOKEN_ENDPOINT
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "op.token_endpoint",
+        key -> Setting.simpleString(key, v -> {
+            try {
+                new URI(v);
+            } catch (URISyntaxException e) {
+                throw new IllegalArgumentException("Invalid value [" + v + "] for [" + key + "]. Not a valid URI.", e);
+            }
+        }, Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<String> OP_USERINFO_ENDPOINT
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "op.userinfo_endpoint",
+        key -> Setting.simpleString(key, v -> {
+            try {
+                new URI(v);
+            } catch (URISyntaxException e) {
+                throw new IllegalArgumentException("Invalid value [" + v + "] for [" + key + "]. Not a valid URI.", e);
+            }
+        }, Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<String> OP_ENDSESSION_ENDPOINT
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "op.endsession_endpoint",
+        key -> Setting.simpleString(key, v -> {
+            try {
+                new URI(v);
+            } catch (URISyntaxException e) {
+                throw new IllegalArgumentException("Invalid value [" + v + "] for [" + key + "]. Not a valid URI.", e);
+            }
+        }, Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<String> OP_ISSUER
+        = RealmSettings.simpleString(TYPE, "op.issuer", Setting.Property.NodeScope);
+    public static final Setting.AffixSetting<String> OP_JWKSET_PATH
+        = RealmSettings.simpleString(TYPE, "op.jwkset_path", Setting.Property.NodeScope);
+
+    public static final Setting.AffixSetting<TimeValue> ALLOWED_CLOCK_SKEW
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "allowed_clock_skew",
+        key -> Setting.timeSetting(key, TimeValue.timeValueSeconds(60), Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<Boolean> POPULATE_USER_METADATA = Setting.affixKeySetting(
+        RealmSettings.realmSettingPrefix(TYPE), "populate_user_metadata",
+        key -> Setting.boolSetting(key, true, Setting.Property.NodeScope));
+    private static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueSeconds(5);
+    public static final Setting.AffixSetting<TimeValue> HTTP_CONNECT_TIMEOUT
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "http.connect_timeout",
+        key -> Setting.timeSetting(key, DEFAULT_TIMEOUT, Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<TimeValue> HTTP_CONNECTION_READ_TIMEOUT
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "http.connection_read_timeout",
+        key -> Setting.timeSetting(key, DEFAULT_TIMEOUT, Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<TimeValue> HTTP_SOCKET_TIMEOUT
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "http.socket_timeout",
+        key -> Setting.timeSetting(key, DEFAULT_TIMEOUT, Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<Integer> HTTP_MAX_CONNECTIONS
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "http.max_connections",
+        key -> Setting.intSetting(key, 200, Setting.Property.NodeScope));
+    public static final Setting.AffixSetting<Integer> HTTP_MAX_ENDPOINT_CONNECTIONS
+        = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "http.max_endpoint_connections",
+        key -> Setting.intSetting(key, 200, Setting.Property.NodeScope));
+
+    public static final ClaimSetting PRINCIPAL_CLAIM = new ClaimSetting("principal");
+    public static final ClaimSetting GROUPS_CLAIM = new ClaimSetting("groups");
+    public static final ClaimSetting NAME_CLAIM = new ClaimSetting("name");
+    public static final ClaimSetting DN_CLAIM = new ClaimSetting("dn");
+    public static final ClaimSetting MAIL_CLAIM = new ClaimSetting("mail");
+
+    public static Set<Setting.AffixSetting<?>> getSettings() {
+        final Set<Setting.AffixSetting<?>> set = Sets.newHashSet(
+            RP_CLIENT_ID, RP_REDIRECT_URI, RP_RESPONSE_TYPE, RP_REQUESTED_SCOPES, RP_CLIENT_SECRET, RP_SIGNATURE_ALGORITHM,
+            RP_POST_LOGOUT_REDIRECT_URI, OP_NAME, OP_AUTHORIZATION_ENDPOINT, OP_TOKEN_ENDPOINT, OP_USERINFO_ENDPOINT,
+            OP_ENDSESSION_ENDPOINT, OP_ISSUER, OP_JWKSET_PATH, HTTP_CONNECT_TIMEOUT, HTTP_CONNECTION_READ_TIMEOUT, HTTP_SOCKET_TIMEOUT,
+            HTTP_MAX_CONNECTIONS, HTTP_MAX_ENDPOINT_CONNECTIONS, ALLOWED_CLOCK_SKEW);
+        set.addAll(DelegatedAuthorizationSettings.getSettings(TYPE));
+        set.addAll(RealmSettings.getStandardSettings(TYPE));
+        set.addAll(SSLConfigurationSettings.getRealmSettings(TYPE));
+        set.addAll(PRINCIPAL_CLAIM.settings());
+        set.addAll(GROUPS_CLAIM.settings());
+        set.addAll(DN_CLAIM.settings());
+        set.addAll(NAME_CLAIM.settings());
+        set.addAll(MAIL_CLAIM.settings());
+        return set;
+    }
+
+    /**
+     * The OIDC realm offers a number of settings that rely on claim values that are populated by the OP in the ID Token or the User Info
+     * response.
+     * Each claim has 2 settings:
+     * <ul>
+     * <li>The name of the OpenID Connect claim to use</li>
+     * <li>An optional java pattern (regex) to apply to that claim value in order to extract the substring that should be used.</li>
+     * </ul>
+     * For example, the Elasticsearch User Principal could be configured to come from the OpenID Connect standard claim "email",
+     * and extract only the local-part of the user's email address (i.e. the name before the '@').
+     * This class encapsulates those 2 settings.
+     */
+    public static final class ClaimSetting {
+        public static final String CLAIMS_PREFIX = "claims.";
+        public static final String CLAIM_PATTERNS_PREFIX = "claim_patterns.";
+
+        private final Setting.AffixSetting<String> claim;
+        private final Setting.AffixSetting<String> pattern;
+
+        public ClaimSetting(String name) {
+            claim = RealmSettings.simpleString(TYPE, CLAIMS_PREFIX + name, Setting.Property.NodeScope);
+            pattern = RealmSettings.simpleString(TYPE, CLAIM_PATTERNS_PREFIX + name, Setting.Property.NodeScope);
+        }
+
+        public Collection<Setting.AffixSetting<?>> settings() {
+            return Arrays.asList(getClaim(), getPattern());
+        }
+
+        public String name(RealmConfig config) {
+            return getClaim().getConcreteSettingForNamespace(config.name()).getKey();
+        }
+
+        public Setting.AffixSetting<String> getClaim() {
+            return claim;
+        }
+
+        public Setting.AffixSetting<String> getPattern() {
+            return pattern;
+        }
+    }
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java
index 2190d6e63ba..c929fb3bfd3 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java
@@ -37,6 +37,7 @@ public final class ClusterPrivilege extends Privilege {
     private static final Automaton MANAGE_SECURITY_AUTOMATON = patterns("cluster:admin/xpack/security/*");
     private static final Automaton MANAGE_SAML_AUTOMATON = patterns("cluster:admin/xpack/security/saml/*",
             InvalidateTokenAction.NAME, RefreshTokenAction.NAME);
+    private static final Automaton MANAGE_OIDC_AUTOMATON = patterns("cluster:admin/xpack/security/oidc/*");
     private static final Automaton MANAGE_TOKEN_AUTOMATON = patterns("cluster:admin/xpack/security/token/*");
     private static final Automaton MONITOR_AUTOMATON = patterns("cluster:monitor/*");
     private static final Automaton MONITOR_ML_AUTOMATON = patterns("cluster:monitor/xpack/ml/*");
@@ -82,6 +83,7 @@ public final class ClusterPrivilege extends Privilege {
     public static final ClusterPrivilege TRANSPORT_CLIENT =      new ClusterPrivilege("transport_client",    TRANSPORT_CLIENT_AUTOMATON);
     public static final ClusterPrivilege MANAGE_SECURITY =       new ClusterPrivilege("manage_security",     MANAGE_SECURITY_AUTOMATON);
     public static final ClusterPrivilege MANAGE_SAML =           new ClusterPrivilege("manage_saml",         MANAGE_SAML_AUTOMATON);
+    public static final ClusterPrivilege MANAGE_OIDC =           new ClusterPrivilege("manage_oidc", MANAGE_OIDC_AUTOMATON);
     public static final ClusterPrivilege MANAGE_PIPELINE =       new ClusterPrivilege("manage_pipeline", "cluster:admin/ingest/pipeline/*");
     public static final ClusterPrivilege MANAGE_CCR =            new ClusterPrivilege("manage_ccr", MANAGE_CCR_AUTOMATON);
     public static final ClusterPrivilege READ_CCR =              new ClusterPrivilege("read_ccr", READ_CCR_AUTOMATON);
@@ -109,6 +111,7 @@ public final class ClusterPrivilege extends Privilege {
             .put("transport_client", TRANSPORT_CLIENT)
             .put("manage_security", MANAGE_SECURITY)
             .put("manage_saml", MANAGE_SAML)
+            .put("manage_oidc", MANAGE_OIDC)
             .put("manage_pipeline", MANAGE_PIPELINE)
             .put("manage_rollup", MANAGE_ROLLUP)
             .put("manage_ccr", MANAGE_CCR)
diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle
index 067bb218e7e..6f99fe87bf4 100644
--- a/x-pack/plugin/security/build.gradle
+++ b/x-pack/plugin/security/build.gradle
@@ -56,6 +56,16 @@ dependencies {
     compile "org.apache.httpcomponents:httpclient-cache:${versions.httpclient}"
     compile 'com.google.guava:guava:19.0'
 
+    // Dependencies for oidc
+    compile "com.nimbusds:oauth2-oidc-sdk:6.5"
+    compile "com.nimbusds:nimbus-jose-jwt:4.41.2"
+    compile "com.nimbusds:lang-tag:1.4.4"
+    compile "com.sun.mail:jakarta.mail:1.6.3"
+    compile "net.jcip:jcip-annotations:1.0"
+    compile "net.minidev:json-smart:2.3"
+    compile "net.minidev:accessors-smart:1.2"
+    compile "org.ow2.asm:asm:7.1"
+
     testCompile 'org.elasticsearch:securemock:1.2'
     testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}"
     //testCompile "org.yaml:snakeyaml:${versions.snakeyaml}"
@@ -162,7 +172,7 @@ forbiddenPatterns {
 }
 
 forbiddenApisMain {
-    signaturesFiles += files('forbidden/ldap-signatures.txt', 'forbidden/xml-signatures.txt')
+    signaturesFiles += files('forbidden/ldap-signatures.txt', 'forbidden/xml-signatures.txt', 'forbidden/oidc-signatures.txt')
 }
 
 // classes are missing, e.g. com.ibm.icu.lang.UCharacter
@@ -259,7 +269,9 @@ thirdPartyAudit {
         'net.sf.ehcache.Ehcache',
         'net.sf.ehcache.Element',
         // [missing classes] SLF4j includes an optional class that depends on an extension class (!)
-        'org.slf4j.ext.EventData'
+        'org.slf4j.ext.EventData',
+        // Optional dependency of oauth2-oidc-sdk that we don't need since we do not support AES-SIV for JWE
+        'org.cryptomator.siv.SivMode'
     )
 
     ignoreViolations (
@@ -280,7 +292,13 @@ if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) {
         'javax.xml.bind.JAXBElement',
         'javax.xml.bind.JAXBException',
         'javax.xml.bind.Unmarshaller',
-        'javax.xml.bind.UnmarshallerHandler'
+        'javax.xml.bind.UnmarshallerHandler', 
+        'javax.activation.ActivationDataFlavor',
+        'javax.activation.DataContentHandler',
+        'javax.activation.DataHandler',
+        'javax.activation.DataSource',
+        'javax.activation.FileDataSource',
+        'javax.activation.FileTypeMap'
     )
 }
 
diff --git a/x-pack/plugin/security/forbidden/oidc-signatures.txt b/x-pack/plugin/security/forbidden/oidc-signatures.txt
new file mode 100644
index 00000000000..05a2babdbe7
--- /dev/null
+++ b/x-pack/plugin/security/forbidden/oidc-signatures.txt
@@ -0,0 +1,3 @@
+@defaultMessage Blocking methods should not be used for HTTP requests. Use CloseableHttpAsyncClient instead
+com.nimbusds.oauth2.sdk.http.HTTPRequest#send(javax.net.ssl.HostnameVerifier, javax.net.ssl.SSLSocketFactory)
+com.nimbusds.oauth2.sdk.http.HTTPRequest#send()
\ No newline at end of file
diff --git a/x-pack/plugin/security/licenses/accessors-smart-1.2.jar.sha1 b/x-pack/plugin/security/licenses/accessors-smart-1.2.jar.sha1
new file mode 100644
index 00000000000..e8e174c88c7
--- /dev/null
+++ b/x-pack/plugin/security/licenses/accessors-smart-1.2.jar.sha1
@@ -0,0 +1 @@
+c592b500269bfde36096641b01238a8350f8aa31
\ No newline at end of file
diff --git a/x-pack/plugin/security/licenses/accessors-smart-LICENSE.txt b/x-pack/plugin/security/licenses/accessors-smart-LICENSE.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/x-pack/plugin/security/licenses/accessors-smart-LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/x-pack/plugin/security/licenses/accessors-smart-NOTICE.txt b/x-pack/plugin/security/licenses/accessors-smart-NOTICE.txt
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/x-pack/plugin/security/licenses/asm-7.1.jar.sha1 b/x-pack/plugin/security/licenses/asm-7.1.jar.sha1
new file mode 100644
index 00000000000..3a53b2ef7f9
--- /dev/null
+++ b/x-pack/plugin/security/licenses/asm-7.1.jar.sha1
@@ -0,0 +1 @@
+fa29aa438674ff19d5e1386d2c3527a0267f291e
\ No newline at end of file
diff --git a/x-pack/plugin/security/licenses/asm-LICENSE.txt b/x-pack/plugin/security/licenses/asm-LICENSE.txt
new file mode 100644
index 00000000000..afb064f2f26
--- /dev/null
+++ b/x-pack/plugin/security/licenses/asm-LICENSE.txt
@@ -0,0 +1,26 @@
+Copyright (c) 2012 France Télécom
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+3. Neither the name of the copyright holders nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/x-pack/plugin/security/licenses/asm-NOTICE.txt b/x-pack/plugin/security/licenses/asm-NOTICE.txt
new file mode 100644
index 00000000000..8d1c8b69c3f
--- /dev/null
+++ b/x-pack/plugin/security/licenses/asm-NOTICE.txt
@@ -0,0 +1 @@
+ 
diff --git a/x-pack/plugin/security/licenses/jakarta.mail-1.6.3.jar.sha1 b/x-pack/plugin/security/licenses/jakarta.mail-1.6.3.jar.sha1
new file mode 100644
index 00000000000..12d5021ee37
--- /dev/null
+++ b/x-pack/plugin/security/licenses/jakarta.mail-1.6.3.jar.sha1
@@ -0,0 +1 @@
+787e007e377223bba85a33599d3da416c135f99b
\ No newline at end of file
diff --git a/x-pack/plugin/security/licenses/jakarta.mail-LICENSE.txt b/x-pack/plugin/security/licenses/jakarta.mail-LICENSE.txt
new file mode 100644
index 00000000000..5de3d1b40c1
--- /dev/null
+++ b/x-pack/plugin/security/licenses/jakarta.mail-LICENSE.txt
@@ -0,0 +1,637 @@
+# Eclipse Public License - v 2.0
+
+        THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE
+        PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
+        OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
+
+    1. DEFINITIONS
+
+    "Contribution" means:
+
+      a) in the case of the initial Contributor, the initial content
+         Distributed under this Agreement, and
+
+      b) in the case of each subsequent Contributor: 
+         i) changes to the Program, and 
+         ii) additions to the Program;
+      where such changes and/or additions to the Program originate from
+      and are Distributed by that particular Contributor. A Contribution
+      "originates" from a Contributor if it was added to the Program by
+      such Contributor itself or anyone acting on such Contributor's behalf.
+      Contributions do not include changes or additions to the Program that
+      are not Modified Works.
+
+    "Contributor" means any person or entity that Distributes the Program.
+
+    "Licensed Patents" mean patent claims licensable by a Contributor which
+    are necessarily infringed by the use or sale of its Contribution alone
+    or when combined with the Program.
+
+    "Program" means the Contributions Distributed in accordance with this
+    Agreement.
+
+    "Recipient" means anyone who receives the Program under this Agreement
+    or any Secondary License (as applicable), including Contributors.
+
+    "Derivative Works" shall mean any work, whether in Source Code or other
+    form, that is based on (or derived from) the Program and for which the
+    editorial revisions, annotations, elaborations, or other modifications
+    represent, as a whole, an original work of authorship.
+
+    "Modified Works" shall mean any work in Source Code or other form that
+    results from an addition to, deletion from, or modification of the
+    contents of the Program, including, for purposes of clarity any new file
+    in Source Code form that contains any contents of the Program. Modified
+    Works shall not include works that contain only declarations,
+    interfaces, types, classes, structures, or files of the Program solely
+    in each case in order to link to, bind by name, or subclass the Program
+    or Modified Works thereof.
+
+    "Distribute" means the acts of a) distributing or b) making available
+    in any manner that enables the transfer of a copy.
+
+    "Source Code" means the form of a Program preferred for making
+    modifications, including but not limited to software source code,
+    documentation source, and configuration files.
+
+    "Secondary License" means either the GNU General Public License,
+    Version 2.0, or any later versions of that license, including any
+    exceptions or additional permissions as identified by the initial
+    Contributor.
+
+    2. GRANT OF RIGHTS
+
+      a) Subject to the terms of this Agreement, each Contributor hereby
+      grants Recipient a non-exclusive, worldwide, royalty-free copyright
+      license to reproduce, prepare Derivative Works of, publicly display,
+      publicly perform, Distribute and sublicense the Contribution of such
+      Contributor, if any, and such Derivative Works.
+
+      b) Subject to the terms of this Agreement, each Contributor hereby
+      grants Recipient a non-exclusive, worldwide, royalty-free patent
+      license under Licensed Patents to make, use, sell, offer to sell,
+      import and otherwise transfer the Contribution of such Contributor,
+      if any, in Source Code or other form. This patent license shall
+      apply to the combination of the Contribution and the Program if, at
+      the time the Contribution is added by the Contributor, such addition
+      of the Contribution causes such combination to be covered by the
+      Licensed Patents. The patent license shall not apply to any other
+      combinations which include the Contribution. No hardware per se is
+      licensed hereunder.
+
+      c) Recipient understands that although each Contributor grants the
+      licenses to its Contributions set forth herein, no assurances are
+      provided by any Contributor that the Program does not infringe the
+      patent or other intellectual property rights of any other entity.
+      Each Contributor disclaims any liability to Recipient for claims
+      brought by any other entity based on infringement of intellectual
+      property rights or otherwise. As a condition to exercising the
+      rights and licenses granted hereunder, each Recipient hereby
+      assumes sole responsibility to secure any other intellectual
+      property rights needed, if any. For example, if a third party
+      patent license is required to allow Recipient to Distribute the
+      Program, it is Recipient's responsibility to acquire that license
+      before distributing the Program.
+
+      d) Each Contributor represents that to its knowledge it has
+      sufficient copyright rights in its Contribution, if any, to grant
+      the copyright license set forth in this Agreement.
+
+      e) Notwithstanding the terms of any Secondary License, no
+      Contributor makes additional grants to any Recipient (other than
+      those set forth in this Agreement) as a result of such Recipient's
+      receipt of the Program under the terms of a Secondary License
+      (if permitted under the terms of Section 3).
+
+    3. REQUIREMENTS
+
+    3.1 If a Contributor Distributes the Program in any form, then:
+
+      a) the Program must also be made available as Source Code, in
+      accordance with section 3.2, and the Contributor must accompany
+      the Program with a statement that the Source Code for the Program
+      is available under this Agreement, and informs Recipients how to
+      obtain it in a reasonable manner on or through a medium customarily
+      used for software exchange; and
+
+      b) the Contributor may Distribute the Program under a license
+      different than this Agreement, provided that such license:
+         i) effectively disclaims on behalf of all other Contributors all
+         warranties and conditions, express and implied, including
+         warranties or conditions of title and non-infringement, and
+         implied warranties or conditions of merchantability and fitness
+         for a particular purpose;
+
+         ii) effectively excludes on behalf of all other Contributors all
+         liability for damages, including direct, indirect, special,
+         incidental and consequential damages, such as lost profits;
+
+         iii) does not attempt to limit or alter the recipients' rights
+         in the Source Code under section 3.2; and
+
+         iv) requires any subsequent distribution of the Program by any
+         party to be under a license that satisfies the requirements
+         of this section 3.
+
+    3.2 When the Program is Distributed as Source Code:
+
+      a) it must be made available under this Agreement, or if the
+      Program (i) is combined with other material in a separate file or
+      files made available under a Secondary License, and (ii) the initial
+      Contributor attached to the Source Code the notice described in
+      Exhibit A of this Agreement, then the Program may be made available
+      under the terms of such Secondary Licenses, and
+
+      b) a copy of this Agreement must be included with each copy of
+      the Program.
+
+    3.3 Contributors may not remove or alter any copyright, patent,
+    trademark, attribution notices, disclaimers of warranty, or limitations
+    of liability ("notices") contained within the Program from any copy of
+    the Program which they Distribute, provided that Contributors may add
+    their own appropriate notices.
+
+    4. COMMERCIAL DISTRIBUTION
+
+    Commercial distributors of software may accept certain responsibilities
+    with respect to end users, business partners and the like. While this
+    license is intended to facilitate the commercial use of the Program,
+    the Contributor who includes the Program in a commercial product
+    offering should do so in a manner which does not create potential
+    liability for other Contributors. Therefore, if a Contributor includes
+    the Program in a commercial product offering, such Contributor
+    ("Commercial Contributor") hereby agrees to defend and indemnify every
+    other Contributor ("Indemnified Contributor") against any losses,
+    damages and costs (collectively "Losses") arising from claims, lawsuits
+    and other legal actions brought by a third party against the Indemnified
+    Contributor to the extent caused by the acts or omissions of such
+    Commercial Contributor in connection with its distribution of the Program
+    in a commercial product offering. The obligations in this section do not
+    apply to any claims or Losses relating to any actual or alleged
+    intellectual property infringement. In order to qualify, an Indemnified
+    Contributor must: a) promptly notify the Commercial Contributor in
+    writing of such claim, and b) allow the Commercial Contributor to control,
+    and cooperate with the Commercial Contributor in, the defense and any
+    related settlement negotiations. The Indemnified Contributor may
+    participate in any such claim at its own expense.
+
+    For example, a Contributor might include the Program in a commercial
+    product offering, Product X. That Contributor is then a Commercial
+    Contributor. If that Commercial Contributor then makes performance
+    claims, or offers warranties related to Product X, those performance
+    claims and warranties are such Commercial Contributor's responsibility
+    alone. Under this section, the Commercial Contributor would have to
+    defend claims against the other Contributors related to those performance
+    claims and warranties, and if a court requires any other Contributor to
+    pay any damages as a result, the Commercial Contributor must pay
+    those damages.
+
+    5. NO WARRANTY
+
+    EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT
+    PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS"
+    BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR
+    IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF
+    TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR
+    PURPOSE. Each Recipient is solely responsible for determining the
+    appropriateness of using and distributing the Program and assumes all
+    risks associated with its exercise of rights under this Agreement,
+    including but not limited to the risks and costs of program errors,
+    compliance with applicable laws, damage to or loss of data, programs
+    or equipment, and unavailability or interruption of operations.
+
+    6. DISCLAIMER OF LIABILITY
+
+    EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT
+    PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS
+    SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+    EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST
+    PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+    CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+    ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE
+    EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE
+    POSSIBILITY OF SUCH DAMAGES.
+
+    7. GENERAL
+
+    If any provision of this Agreement is invalid or unenforceable under
+    applicable law, it shall not affect the validity or enforceability of
+    the remainder of the terms of this Agreement, and without further
+    action by the parties hereto, such provision shall be reformed to the
+    minimum extent necessary to make such provision valid and enforceable.
+
+    If Recipient institutes patent litigation against any entity
+    (including a cross-claim or counterclaim in a lawsuit) alleging that the
+    Program itself (excluding combinations of the Program with other software
+    or hardware) infringes such Recipient's patent(s), then such Recipient's
+    rights granted under Section 2(b) shall terminate as of the date such
+    litigation is filed.
+
+    All Recipient's rights under this Agreement shall terminate if it
+    fails to comply with any of the material terms or conditions of this
+    Agreement and does not cure such failure in a reasonable period of
+    time after becoming aware of such noncompliance. If all Recipient's
+    rights under this Agreement terminate, Recipient agrees to cease use
+    and distribution of the Program as soon as reasonably practicable.
+    However, Recipient's obligations under this Agreement and any licenses
+    granted by Recipient relating to the Program shall continue and survive.
+
+    Everyone is permitted to copy and distribute copies of this Agreement,
+    but in order to avoid inconsistency the Agreement is copyrighted and
+    may only be modified in the following manner. The Agreement Steward
+    reserves the right to publish new versions (including revisions) of
+    this Agreement from time to time. No one other than the Agreement
+    Steward has the right to modify this Agreement. The Eclipse Foundation
+    is the initial Agreement Steward. The Eclipse Foundation may assign the
+    responsibility to serve as the Agreement Steward to a suitable separate
+    entity. Each new version of the Agreement will be given a distinguishing
+    version number. The Program (including Contributions) may always be
+    Distributed subject to the version of the Agreement under which it was
+    received. In addition, after a new version of the Agreement is published,
+    Contributor may elect to Distribute the Program (including its
+    Contributions) under the new version.
+
+    Except as expressly stated in Sections 2(a) and 2(b) above, Recipient
+    receives no rights or licenses to the intellectual property of any
+    Contributor under this Agreement, whether expressly, by implication,
+    estoppel or otherwise. All rights in the Program not expressly granted
+    under this Agreement are reserved. Nothing in this Agreement is intended
+    to be enforceable by any entity that is not a Contributor or Recipient.
+    No third-party beneficiary rights are created under this Agreement.
+
+    Exhibit A - Form of Secondary Licenses Notice
+
+    "This Source Code may also be made available under the following 
+    Secondary Licenses when the conditions for such availability set forth 
+    in the Eclipse Public License, v. 2.0 are satisfied: {name license(s),
+    version(s), and exceptions or additional permissions here}."
+
+      Simply including a copy of this Agreement, including this Exhibit A
+      is not sufficient to license the Source Code under Secondary Licenses.
+
+      If it is not possible or desirable to put the notice in a particular
+      file, then You may include the notice in a location (such as a LICENSE
+      file in a relevant directory) where a recipient would be likely to
+      look for such a notice.
+
+      You may add additional accurate notices of copyright ownership.
+
+---
+
+##    The GNU General Public License (GPL) Version 2, June 1991
+
+    Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+    51 Franklin Street, Fifth Floor
+    Boston, MA 02110-1335
+    USA
+
+    Everyone is permitted to copy and distribute verbatim copies
+    of this license document, but changing it is not allowed.
+
+    Preamble
+
+    The licenses for most software are designed to take away your freedom to
+    share and change it. By contrast, the GNU General Public License is
+    intended to guarantee your freedom to share and change free software--to
+    make sure the software is free for all its users. This General Public
+    License applies to most of the Free Software Foundation's software and
+    to any other program whose authors commit to using it. (Some other Free
+    Software Foundation software is covered by the GNU Library General
+    Public License instead.) You can apply it to your programs, too.
+
+    When we speak of free software, we are referring to freedom, not price.
+    Our General Public Licenses are designed to make sure that you have the
+    freedom to distribute copies of free software (and charge for this
+    service if you wish), that you receive source code or can get it if you
+    want it, that you can change the software or use pieces of it in new
+    free programs; and that you know you can do these things.
+
+    To protect your rights, we need to make restrictions that forbid anyone
+    to deny you these rights or to ask you to surrender the rights. These
+    restrictions translate to certain responsibilities for you if you
+    distribute copies of the software, or if you modify it.
+
+    For example, if you distribute copies of such a program, whether gratis
+    or for a fee, you must give the recipients all the rights that you have.
+    You must make sure that they, too, receive or can get the source code.
+    And you must show them these terms so they know their rights.
+
+    We protect your rights with two steps: (1) copyright the software, and
+    (2) offer you this license which gives you legal permission to copy,
+    distribute and/or modify the software.
+
+    Also, for each author's protection and ours, we want to make certain
+    that everyone understands that there is no warranty for this free
+    software. If the software is modified by someone else and passed on, we
+    want its recipients to know that what they have is not the original, so
+    that any problems introduced by others will not reflect on the original
+    authors' reputations.
+
+    Finally, any free program is threatened constantly by software patents.
+    We wish to avoid the danger that redistributors of a free program will
+    individually obtain patent licenses, in effect making the program
+    proprietary. To prevent this, we have made it clear that any patent must
+    be licensed for everyone's free use or not licensed at all.
+
+    The precise terms and conditions for copying, distribution and
+    modification follow.
+
+    TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+    0. This License applies to any program or other work which contains a
+    notice placed by the copyright holder saying it may be distributed under
+    the terms of this General Public License. The "Program", below, refers
+    to any such program or work, and a "work based on the Program" means
+    either the Program or any derivative work under copyright law: that is
+    to say, a work containing the Program or a portion of it, either
+    verbatim or with modifications and/or translated into another language.
+    (Hereinafter, translation is included without limitation in the term
+    "modification".) Each licensee is addressed as "you".
+
+    Activities other than copying, distribution and modification are not
+    covered by this License; they are outside its scope. The act of running
+    the Program is not restricted, and the output from the Program is
+    covered only if its contents constitute a work based on the Program
+    (independent of having been made by running the Program). Whether that
+    is true depends on what the Program does.
+
+    1. You may copy and distribute verbatim copies of the Program's source
+    code as you receive it, in any medium, provided that you conspicuously
+    and appropriately publish on each copy an appropriate copyright notice
+    and disclaimer of warranty; keep intact all the notices that refer to
+    this License and to the absence of any warranty; and give any other
+    recipients of the Program a copy of this License along with the Program.
+
+    You may charge a fee for the physical act of transferring a copy, and
+    you may at your option offer warranty protection in exchange for a fee.
+
+    2. You may modify your copy or copies of the Program or any portion of
+    it, thus forming a work based on the Program, and copy and distribute
+    such modifications or work under the terms of Section 1 above, provided
+    that you also meet all of these conditions:
+
+        a) You must cause the modified files to carry prominent notices
+        stating that you changed the files and the date of any change.
+
+        b) You must cause any work that you distribute or publish, that in
+        whole or in part contains or is derived from the Program or any part
+        thereof, to be licensed as a whole at no charge to all third parties
+        under the terms of this License.
+
+        c) If the modified program normally reads commands interactively
+        when run, you must cause it, when started running for such
+        interactive use in the most ordinary way, to print or display an
+        announcement including an appropriate copyright notice and a notice
+        that there is no warranty (or else, saying that you provide a
+        warranty) and that users may redistribute the program under these
+        conditions, and telling the user how to view a copy of this License.
+        (Exception: if the Program itself is interactive but does not
+        normally print such an announcement, your work based on the Program
+        is not required to print an announcement.)
+
+    These requirements apply to the modified work as a whole. If
+    identifiable sections of that work are not derived from the Program, and
+    can be reasonably considered independent and separate works in
+    themselves, then this License, and its terms, do not apply to those
+    sections when you distribute them as separate works. But when you
+    distribute the same sections as part of a whole which is a work based on
+    the Program, the distribution of the whole must be on the terms of this
+    License, whose permissions for other licensees extend to the entire
+    whole, and thus to each and every part regardless of who wrote it.
+
+    Thus, it is not the intent of this section to claim rights or contest
+    your rights to work written entirely by you; rather, the intent is to
+    exercise the right to control the distribution of derivative or
+    collective works based on the Program.
+
+    In addition, mere aggregation of another work not based on the Program
+    with the Program (or with a work based on the Program) on a volume of a
+    storage or distribution medium does not bring the other work under the
+    scope of this License.
+
+    3. You may copy and distribute the Program (or a work based on it,
+    under Section 2) in object code or executable form under the terms of
+    Sections 1 and 2 above provided that you also do one of the following:
+
+        a) Accompany it with the complete corresponding machine-readable
+        source code, which must be distributed under the terms of Sections 1
+        and 2 above on a medium customarily used for software interchange; or,
+
+        b) Accompany it with a written offer, valid for at least three
+        years, to give any third party, for a charge no more than your cost
+        of physically performing source distribution, a complete
+        machine-readable copy of the corresponding source code, to be
+        distributed under the terms of Sections 1 and 2 above on a medium
+        customarily used for software interchange; or,
+
+        c) Accompany it with the information you received as to the offer to
+        distribute corresponding source code. (This alternative is allowed
+        only for noncommercial distribution and only if you received the
+        program in object code or executable form with such an offer, in
+        accord with Subsection b above.)
+
+    The source code for a work means the preferred form of the work for
+    making modifications to it. For an executable work, complete source code
+    means all the source code for all modules it contains, plus any
+    associated interface definition files, plus the scripts used to control
+    compilation and installation of the executable. However, as a special
+    exception, the source code distributed need not include anything that is
+    normally distributed (in either source or binary form) with the major
+    components (compiler, kernel, and so on) of the operating system on
+    which the executable runs, unless that component itself accompanies the
+    executable.
+
+    If distribution of executable or object code is made by offering access
+    to copy from a designated place, then offering equivalent access to copy
+    the source code from the same place counts as distribution of the source
+    code, even though third parties are not compelled to copy the source
+    along with the object code.
+
+    4. You may not copy, modify, sublicense, or distribute the Program
+    except as expressly provided under this License. Any attempt otherwise
+    to copy, modify, sublicense or distribute the Program is void, and will
+    automatically terminate your rights under this License. However, parties
+    who have received copies, or rights, from you under this License will
+    not have their licenses terminated so long as such parties remain in
+    full compliance.
+
+    5. You are not required to accept this License, since you have not
+    signed it. However, nothing else grants you permission to modify or
+    distribute the Program or its derivative works. These actions are
+    prohibited by law if you do not accept this License. Therefore, by
+    modifying or distributing the Program (or any work based on the
+    Program), you indicate your acceptance of this License to do so, and all
+    its terms and conditions for copying, distributing or modifying the
+    Program or works based on it.
+
+    6. Each time you redistribute the Program (or any work based on the
+    Program), the recipient automatically receives a license from the
+    original licensor to copy, distribute or modify the Program subject to
+    these terms and conditions. You may not impose any further restrictions
+    on the recipients' exercise of the rights granted herein. You are not
+    responsible for enforcing compliance by third parties to this License.
+
+    7. If, as a consequence of a court judgment or allegation of patent
+    infringement or for any other reason (not limited to patent issues),
+    conditions are imposed on you (whether by court order, agreement or
+    otherwise) that contradict the conditions of this License, they do not
+    excuse you from the conditions of this License. If you cannot distribute
+    so as to satisfy simultaneously your obligations under this License and
+    any other pertinent obligations, then as a consequence you may not
+    distribute the Program at all. For example, if a patent license would
+    not permit royalty-free redistribution of the Program by all those who
+    receive copies directly or indirectly through you, then the only way you
+    could satisfy both it and this License would be to refrain entirely from
+    distribution of the Program.
+
+    If any portion of this section is held invalid or unenforceable under
+    any particular circumstance, the balance of the section is intended to
+    apply and the section as a whole is intended to apply in other
+    circumstances.
+
+    It is not the purpose of this section to induce you to infringe any
+    patents or other property right claims or to contest validity of any
+    such claims; this section has the sole purpose of protecting the
+    integrity of the free software distribution system, which is implemented
+    by public license practices. Many people have made generous
+    contributions to the wide range of software distributed through that
+    system in reliance on consistent application of that system; it is up to
+    the author/donor to decide if he or she is willing to distribute
+    software through any other system and a licensee cannot impose that choice.
+
+    This section is intended to make thoroughly clear what is believed to be
+    a consequence of the rest of this License.
+
+    8. If the distribution and/or use of the Program is restricted in
+    certain countries either by patents or by copyrighted interfaces, the
+    original copyright holder who places the Program under this License may
+    add an explicit geographical distribution limitation excluding those
+    countries, so that distribution is permitted only in or among countries
+    not thus excluded. In such case, this License incorporates the
+    limitation as if written in the body of this License.
+
+    9. The Free Software Foundation may publish revised and/or new
+    versions of the General Public License from time to time. Such new
+    versions will be similar in spirit to the present version, but may
+    differ in detail to address new problems or concerns.
+
+    Each version is given a distinguishing version number. If the Program
+    specifies a version number of this License which applies to it and "any
+    later version", you have the option of following the terms and
+    conditions either of that version or of any later version published by
+    the Free Software Foundation. If the Program does not specify a version
+    number of this License, you may choose any version ever published by the
+    Free Software Foundation.
+
+    10. If you wish to incorporate parts of the Program into other free
+    programs whose distribution conditions are different, write to the
+    author to ask for permission. For software which is copyrighted by the
+    Free Software Foundation, write to the Free Software Foundation; we
+    sometimes make exceptions for this. Our decision will be guided by the
+    two goals of preserving the free status of all derivatives of our free
+    software and of promoting the sharing and reuse of software generally.
+
+    NO WARRANTY
+
+    11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO
+    WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+    EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+    OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND,
+    EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+    WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+    ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
+    YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
+    NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+    12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+    WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+    AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
+    DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
+    DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
+    (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
+    INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
+    THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
+    OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+    END OF TERMS AND CONDITIONS
+
+    How to Apply These Terms to Your New Programs
+
+    If you develop a new program, and you want it to be of the greatest
+    possible use to the public, the best way to achieve this is to make it
+    free software which everyone can redistribute and change under these terms.
+
+    To do so, attach the following notices to the program. It is safest to
+    attach them to the start of each source file to most effectively convey
+    the exclusion of warranty; and each file should have at least the
+    "copyright" line and a pointer to where the full notice is found.
+
+        One line to give the program's name and a brief idea of what it does.
+        Copyright (C) <year> <name of author>
+
+        This program is free software; you can redistribute it and/or modify
+        it under the terms of the GNU General Public License as published by
+        the Free Software Foundation; either version 2 of the License, or
+        (at your option) any later version.
+
+        This program is distributed in the hope that it will be useful, but
+        WITHOUT ANY WARRANTY; without even the implied warranty of
+        MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+        General Public License for more details.
+
+        You should have received a copy of the GNU General Public License
+        along with this program; if not, write to the Free Software
+        Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
+
+    Also add information on how to contact you by electronic and paper mail.
+
+    If the program is interactive, make it output a short notice like this
+    when it starts in an interactive mode:
+
+        Gnomovision version 69, Copyright (C) year name of author
+        Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type
+        `show w'. This is free software, and you are welcome to redistribute
+        it under certain conditions; type `show c' for details.
+
+    The hypothetical commands `show w' and `show c' should show the
+    appropriate parts of the General Public License. Of course, the commands
+    you use may be called something other than `show w' and `show c'; they
+    could even be mouse-clicks or menu items--whatever suits your program.
+
+    You should also get your employer (if you work as a programmer) or your
+    school, if any, to sign a "copyright disclaimer" for the program, if
+    necessary. Here is a sample; alter the names:
+
+        Yoyodyne, Inc., hereby disclaims all copyright interest in the
+        program `Gnomovision' (which makes passes at compilers) written by
+        James Hacker.
+
+        signature of Ty Coon, 1 April 1989
+        Ty Coon, President of Vice
+
+    This General Public License does not permit incorporating your program
+    into proprietary programs. If your program is a subroutine library, you
+    may consider it more useful to permit linking proprietary applications
+    with the library. If this is what you want to do, use the GNU Library
+    General Public License instead of this License.
+
+---
+
+## CLASSPATH EXCEPTION
+
+    Linking this library statically or dynamically with other modules is
+    making a combined work based on this library.  Thus, the terms and
+    conditions of the GNU General Public License version 2 cover the whole
+    combination.
+
+    As a special exception, the copyright holders of this library give you
+    permission to link this library with independent modules to produce an
+    executable, regardless of the license terms of these independent
+    modules, and to copy and distribute the resulting executable under
+    terms of your choice, provided that you also meet, for each linked
+    independent module, the terms and conditions of the license of that
+    module.  An independent module is a module which is not derived from or
+    based on this library.  If you modify this library, you may extend this
+    exception to your version of the library, but you are not obligated to
+    do so.  If you do not wish to do so, delete this exception statement
+    from your version.
diff --git a/x-pack/plugin/security/licenses/jakarta.mail-NOTICE.txt b/x-pack/plugin/security/licenses/jakarta.mail-NOTICE.txt
new file mode 100644
index 00000000000..9a5159e29c9
--- /dev/null
+++ b/x-pack/plugin/security/licenses/jakarta.mail-NOTICE.txt
@@ -0,0 +1,50 @@
+# Notices for Eclipse Project for JavaMail
+
+This content is produced and maintained by the Eclipse Project for JavaMail
+project.
+
+* Project home: https://projects.eclipse.org/projects/ee4j.javamail
+
+## Trademarks
+
+Eclipse Project for JavaMail is a trademark of the Eclipse Foundation.
+
+## Copyright
+
+All content is the property of the respective authors or their employers. For
+more information regarding authorship of content, please consult the listed
+source code repository logs.
+
+## Declared Project Licenses
+
+This program and the accompanying materials are made available under the terms
+of the Eclipse Public License v. 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0. This Source Code may also be made
+available under the following Secondary Licenses when the conditions for such
+availability set forth in the Eclipse Public License v. 2.0 are satisfied: GNU
+General Public License, version 2 with the GNU Classpath Exception which is
+available at https://www.gnu.org/software/classpath/license.html.
+
+SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0
+
+## Source Code
+
+The project maintains the following source code repositories:
+
+* https://github.com/eclipse-ee4j/javamail
+
+## Third-party Content
+
+This project leverages the following third party content.
+
+None
+
+## Cryptography
+
+Content may contain encryption software. The country in which you are currently
+may have restrictions on the import, possession, and use, and/or re-export to
+another country, of encryption software. BEFORE using any encryption software,
+please check the country's laws, regulations and policies concerning the import,
+possession, or use, and re-export of encryption software, to see if this is
+permitted.
+
diff --git a/x-pack/plugin/security/licenses/jcip-annotations-1.0.jar.sha1 b/x-pack/plugin/security/licenses/jcip-annotations-1.0.jar.sha1
new file mode 100644
index 00000000000..9eaed527099
--- /dev/null
+++ b/x-pack/plugin/security/licenses/jcip-annotations-1.0.jar.sha1
@@ -0,0 +1 @@
+afba4942caaeaf46aab0b976afd57cc7c181467e
\ No newline at end of file
diff --git a/x-pack/plugin/security/licenses/jcip-annotations-LICENSE.txt b/x-pack/plugin/security/licenses/jcip-annotations-LICENSE.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/x-pack/plugin/security/licenses/jcip-annotations-LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/x-pack/plugin/security/licenses/jcip-annotations-NOTICE.txt b/x-pack/plugin/security/licenses/jcip-annotations-NOTICE.txt
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/x-pack/plugin/security/licenses/json-smart-2.3.jar.sha1 b/x-pack/plugin/security/licenses/json-smart-2.3.jar.sha1
new file mode 100644
index 00000000000..8c5c1588c15
--- /dev/null
+++ b/x-pack/plugin/security/licenses/json-smart-2.3.jar.sha1
@@ -0,0 +1 @@
+007396407491352ce4fa30de92efb158adb76b5b
\ No newline at end of file
diff --git a/x-pack/plugin/security/licenses/json-smart-LICENSE.txt b/x-pack/plugin/security/licenses/json-smart-LICENSE.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/x-pack/plugin/security/licenses/json-smart-LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/x-pack/plugin/security/licenses/json-smart-NOTICE.txt b/x-pack/plugin/security/licenses/json-smart-NOTICE.txt
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/x-pack/plugin/security/licenses/lang-tag-1.4.4.jar.sha1 b/x-pack/plugin/security/licenses/lang-tag-1.4.4.jar.sha1
new file mode 100644
index 00000000000..9f21e84c8af
--- /dev/null
+++ b/x-pack/plugin/security/licenses/lang-tag-1.4.4.jar.sha1
@@ -0,0 +1 @@
+1db9a709239ae473a69b5424c7e78d0b7108229d
\ No newline at end of file
diff --git a/x-pack/plugin/security/licenses/lang-tag-LICENSE.txt b/x-pack/plugin/security/licenses/lang-tag-LICENSE.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/x-pack/plugin/security/licenses/lang-tag-LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/x-pack/plugin/security/licenses/lang-tag-NOTICE.txt b/x-pack/plugin/security/licenses/lang-tag-NOTICE.txt
new file mode 100644
index 00000000000..37a85f6850d
--- /dev/null
+++ b/x-pack/plugin/security/licenses/lang-tag-NOTICE.txt
@@ -0,0 +1,14 @@
+Nimbus Language Tags
+
+Copyright 2012-2016, Connect2id Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
diff --git a/x-pack/plugin/security/licenses/nimbus-jose-jwt-4.41.2.jar.sha1 b/x-pack/plugin/security/licenses/nimbus-jose-jwt-4.41.2.jar.sha1
new file mode 100644
index 00000000000..7713379f35a
--- /dev/null
+++ b/x-pack/plugin/security/licenses/nimbus-jose-jwt-4.41.2.jar.sha1
@@ -0,0 +1 @@
+3981d32ddfa2919a7af46eb5e484f8dc064da665
\ No newline at end of file
diff --git a/x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt b/x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt b/x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt
new file mode 100644
index 00000000000..cb9ad94f662
--- /dev/null
+++ b/x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt
@@ -0,0 +1,14 @@
+Nimbus JOSE + JWT
+
+Copyright 2012 - 2018, Connect2id Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
diff --git a/x-pack/plugin/security/licenses/oauth2-oidc-sdk-6.5.jar.sha1 b/x-pack/plugin/security/licenses/oauth2-oidc-sdk-6.5.jar.sha1
new file mode 100644
index 00000000000..12e6376c4db
--- /dev/null
+++ b/x-pack/plugin/security/licenses/oauth2-oidc-sdk-6.5.jar.sha1
@@ -0,0 +1 @@
+422759fc195f65345e8da3265c69dea3c6cf56a5
\ No newline at end of file
diff --git a/x-pack/plugin/security/licenses/oauth2-oidc-sdk-LICENSE.txt b/x-pack/plugin/security/licenses/oauth2-oidc-sdk-LICENSE.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/x-pack/plugin/security/licenses/oauth2-oidc-sdk-LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/x-pack/plugin/security/licenses/oauth2-oidc-sdk-NOTICE.txt b/x-pack/plugin/security/licenses/oauth2-oidc-sdk-NOTICE.txt
new file mode 100644
index 00000000000..5e111b04cfc
--- /dev/null
+++ b/x-pack/plugin/security/licenses/oauth2-oidc-sdk-NOTICE.txt
@@ -0,0 +1,14 @@
+Nimbus OAuth 2.0 SDK with OpenID Connect extensions
+
+Copyright 2012-2018, Connect2id Ltd and contributors.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java
index 33de1c90f03..3010c3637b6 100644
--- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java
@@ -80,6 +80,9 @@ import org.elasticsearch.xpack.core.security.SecuritySettings;
 import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction;
 import org.elasticsearch.xpack.core.security.action.GetApiKeyAction;
 import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectAuthenticateAction;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectLogoutAction;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectPrepareAuthenticationAction;
 import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesAction;
 import org.elasticsearch.xpack.core.security.action.privilege.GetPrivilegesAction;
 import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesAction;
@@ -135,6 +138,9 @@ import org.elasticsearch.xpack.security.action.TransportCreateApiKeyAction;
 import org.elasticsearch.xpack.security.action.TransportGetApiKeyAction;
 import org.elasticsearch.xpack.security.action.TransportInvalidateApiKeyAction;
 import org.elasticsearch.xpack.security.action.filter.SecurityActionFilter;
+import org.elasticsearch.xpack.security.action.oidc.TransportOpenIdConnectAuthenticateAction;
+import org.elasticsearch.xpack.security.action.oidc.TransportOpenIdConnectLogoutAction;
+import org.elasticsearch.xpack.security.action.oidc.TransportOpenIdConnectPrepareAuthenticationAction;
 import org.elasticsearch.xpack.security.action.privilege.TransportDeletePrivilegesAction;
 import org.elasticsearch.xpack.security.action.privilege.TransportGetPrivilegesAction;
 import org.elasticsearch.xpack.security.action.privilege.TransportPutPrivilegesAction;
@@ -194,6 +200,7 @@ import org.elasticsearch.xpack.security.rest.action.RestGetApiKeyAction;
 import org.elasticsearch.xpack.security.rest.action.RestInvalidateApiKeyAction;
 import org.elasticsearch.xpack.security.rest.action.oauth2.RestGetTokenAction;
 import org.elasticsearch.xpack.security.rest.action.oauth2.RestInvalidateTokenAction;
+import org.elasticsearch.xpack.security.rest.action.oidc.RestOpenIdConnectLogoutAction;
 import org.elasticsearch.xpack.security.rest.action.privilege.RestDeletePrivilegesAction;
 import org.elasticsearch.xpack.security.rest.action.privilege.RestGetPrivilegesAction;
 import org.elasticsearch.xpack.security.rest.action.privilege.RestPutPrivilegesAction;
@@ -205,6 +212,8 @@ import org.elasticsearch.xpack.security.rest.action.role.RestPutRoleAction;
 import org.elasticsearch.xpack.security.rest.action.rolemapping.RestDeleteRoleMappingAction;
 import org.elasticsearch.xpack.security.rest.action.rolemapping.RestGetRoleMappingsAction;
 import org.elasticsearch.xpack.security.rest.action.rolemapping.RestPutRoleMappingAction;
+import org.elasticsearch.xpack.security.rest.action.oidc.RestOpenIdConnectAuthenticateAction;
+import org.elasticsearch.xpack.security.rest.action.oidc.RestOpenIdConnectPrepareAuthenticationAction;
 import org.elasticsearch.xpack.security.rest.action.saml.RestSamlAuthenticateAction;
 import org.elasticsearch.xpack.security.rest.action.saml.RestSamlInvalidateSessionAction;
 import org.elasticsearch.xpack.security.rest.action.saml.RestSamlLogoutAction;
@@ -746,6 +755,10 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
                 new ActionHandler<>(SamlAuthenticateAction.INSTANCE, TransportSamlAuthenticateAction.class),
                 new ActionHandler<>(SamlLogoutAction.INSTANCE, TransportSamlLogoutAction.class),
                 new ActionHandler<>(SamlInvalidateSessionAction.INSTANCE, TransportSamlInvalidateSessionAction.class),
+                new ActionHandler<>(OpenIdConnectPrepareAuthenticationAction.INSTANCE,
+                    TransportOpenIdConnectPrepareAuthenticationAction.class),
+                new ActionHandler<>(OpenIdConnectAuthenticateAction.INSTANCE, TransportOpenIdConnectAuthenticateAction.class),
+                new ActionHandler<>(OpenIdConnectLogoutAction.INSTANCE, TransportOpenIdConnectLogoutAction.class),
                 new ActionHandler<>(GetPrivilegesAction.INSTANCE, TransportGetPrivilegesAction.class),
                 new ActionHandler<>(PutPrivilegesAction.INSTANCE, TransportPutPrivilegesAction.class),
                 new ActionHandler<>(DeletePrivilegesAction.INSTANCE, TransportDeletePrivilegesAction.class),
@@ -798,6 +811,9 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
                 new RestSamlAuthenticateAction(settings, restController, getLicenseState()),
                 new RestSamlLogoutAction(settings, restController, getLicenseState()),
                 new RestSamlInvalidateSessionAction(settings, restController, getLicenseState()),
+                new RestOpenIdConnectPrepareAuthenticationAction(settings, restController, getLicenseState()),
+                new RestOpenIdConnectAuthenticateAction(settings, restController, getLicenseState()),
+                new RestOpenIdConnectLogoutAction(settings, restController, getLicenseState()),
                 new RestGetPrivilegesAction(settings, restController, getLicenseState()),
                 new RestPutPrivilegesAction(settings, restController, getLicenseState()),
                 new RestDeletePrivilegesAction(settings, restController, getLicenseState()),
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java
new file mode 100644
index 00000000000..1b4aff064a0
--- /dev/null
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.action.oidc;
+
+import com.nimbusds.oauth2.sdk.id.State;
+import com.nimbusds.openid.connect.sdk.Nonce;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.HandledTransportAction;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectAuthenticateRequest;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectAuthenticateResponse;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectAuthenticateAction;
+import org.elasticsearch.xpack.core.security.authc.Authentication;
+import org.elasticsearch.xpack.core.security.authc.AuthenticationResult;
+import org.elasticsearch.xpack.security.authc.AuthenticationService;
+import org.elasticsearch.xpack.security.authc.TokenService;
+import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectRealm;
+import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectToken;
+
+import java.util.Map;
+
+public class TransportOpenIdConnectAuthenticateAction
+    extends HandledTransportAction<OpenIdConnectAuthenticateRequest, OpenIdConnectAuthenticateResponse> {
+
+    private final ThreadPool threadPool;
+    private final AuthenticationService authenticationService;
+    private final TokenService tokenService;
+
+    @Inject
+    public TransportOpenIdConnectAuthenticateAction(ThreadPool threadPool, TransportService transportService,
+                                                    ActionFilters actionFilters, AuthenticationService authenticationService,
+                                                    TokenService tokenService) {
+        super(OpenIdConnectAuthenticateAction.NAME, transportService, actionFilters,
+            (Writeable.Reader<OpenIdConnectAuthenticateRequest>) OpenIdConnectAuthenticateRequest::new);
+        this.threadPool = threadPool;
+        this.authenticationService = authenticationService;
+        this.tokenService = tokenService;
+    }
+
+    @Override
+    protected void doExecute(Task task, OpenIdConnectAuthenticateRequest request,
+                             ActionListener<OpenIdConnectAuthenticateResponse> listener) {
+        final OpenIdConnectToken token = new OpenIdConnectToken(request.getRedirectUri(), new State(request.getState()),
+            new Nonce(request.getNonce()));
+        final ThreadContext threadContext = threadPool.getThreadContext();
+        Authentication originatingAuthentication = Authentication.getAuthentication(threadContext);
+        try (ThreadContext.StoredContext ignore = threadContext.stashContext()) {
+            authenticationService.authenticate(OpenIdConnectAuthenticateAction.NAME, request, token, ActionListener.wrap(
+                authentication -> {
+                    AuthenticationResult result = threadContext.getTransient(AuthenticationResult.THREAD_CONTEXT_KEY);
+                    if (result == null) {
+                        listener.onFailure(new IllegalStateException("Cannot find AuthenticationResult on thread context"));
+                        return;
+                    }
+                    @SuppressWarnings("unchecked") final Map<String, Object> tokenMetadata = (Map<String, Object>) result.getMetadata()
+                        .get(OpenIdConnectRealm.CONTEXT_TOKEN_DATA);
+                    tokenService.createOAuth2Tokens(authentication, originatingAuthentication, tokenMetadata, true,
+                        ActionListener.wrap(tuple -> {
+                            final String tokenString = tokenService.getAccessTokenAsString(tuple.v1());
+                            final TimeValue expiresIn = tokenService.getExpirationDelay();
+                            listener.onResponse(new OpenIdConnectAuthenticateResponse(authentication.getUser().principal(), tokenString,
+                                tuple.v2(), expiresIn));
+                        }, listener::onFailure));
+                }, e -> {
+                    logger.debug(() -> new ParameterizedMessage("OpenIDConnectToken [{}] could not be authenticated", token), e);
+                    listener.onFailure(e);
+                }
+            ));
+        }
+    }
+}
+
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutAction.java
new file mode 100644
index 00000000000..fb1969f4fb0
--- /dev/null
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutAction.java
@@ -0,0 +1,135 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.action.oidc;
+
+import com.nimbusds.jwt.JWT;
+import com.nimbusds.jwt.JWTParser;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ElasticsearchSecurityException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.HandledTransportAction;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectLogoutAction;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectLogoutRequest;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectLogoutResponse;
+import org.elasticsearch.xpack.core.security.authc.Authentication;
+import org.elasticsearch.xpack.core.security.authc.Realm;
+import org.elasticsearch.xpack.core.security.authc.support.TokensInvalidationResult;
+import org.elasticsearch.xpack.core.security.user.User;
+import org.elasticsearch.xpack.security.authc.Realms;
+import org.elasticsearch.xpack.security.authc.TokenService;
+import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectRealm;
+
+import java.text.ParseException;
+import java.util.Map;
+
+/**
+ * Transport action responsible for generating an OpenID connect logout request to be sent to an OpenID Connect Provider
+ */
+public class TransportOpenIdConnectLogoutAction extends HandledTransportAction<OpenIdConnectLogoutRequest, OpenIdConnectLogoutResponse> {
+
+    private final Realms realms;
+    private final TokenService tokenService;
+    private static final Logger logger = LogManager.getLogger(TransportOpenIdConnectLogoutAction.class);
+
+    @Inject
+    public TransportOpenIdConnectLogoutAction(TransportService transportService, ActionFilters actionFilters, Realms realms,
+                                              TokenService tokenService) {
+        super(OpenIdConnectLogoutAction.NAME, transportService, actionFilters,
+            (Writeable.Reader<OpenIdConnectLogoutRequest>) OpenIdConnectLogoutRequest::new);
+        this.realms = realms;
+        this.tokenService = tokenService;
+    }
+
+    @Override
+    protected void doExecute(Task task, OpenIdConnectLogoutRequest request, ActionListener<OpenIdConnectLogoutResponse> listener) {
+        invalidateRefreshToken(request.getRefreshToken(), ActionListener.wrap(ignore -> {
+            final String token = request.getToken();
+            tokenService.getAuthenticationAndMetaData(token, ActionListener.wrap(
+                tuple -> {
+                    final Authentication authentication = tuple.v1();
+                    final Map<String, Object> tokenMetadata = tuple.v2();
+                    validateAuthenticationAndMetadata(authentication, tokenMetadata);
+                    tokenService.invalidateAccessToken(token, ActionListener.wrap(
+                        result -> {
+                            if (logger.isTraceEnabled()) {
+                                logger.trace("OpenID Connect Logout for user [{}] and token [{}...{}]",
+                                    authentication.getUser().principal(),
+                                    token.substring(0, 8),
+                                    token.substring(token.length() - 8));
+                            }
+                            OpenIdConnectLogoutResponse response = buildResponse(authentication, tokenMetadata);
+                            listener.onResponse(response);
+                        }, listener::onFailure)
+                    );
+                }, listener::onFailure));
+        }, listener::onFailure));
+    }
+
+    private OpenIdConnectLogoutResponse buildResponse(Authentication authentication, Map<String, Object> tokenMetadata) {
+        final String idTokenHint = (String) getFromMetadata(tokenMetadata, "id_token_hint");
+        final Realm realm = this.realms.realm(authentication.getAuthenticatedBy().getName());
+        final JWT idToken;
+        try {
+            idToken = JWTParser.parse(idTokenHint);
+        } catch (ParseException e) {
+            throw new ElasticsearchSecurityException("Token Metadata did not contain a valid IdToken", e);
+        }
+        return ((OpenIdConnectRealm) realm).buildLogoutResponse(idToken);
+    }
+
+    private void validateAuthenticationAndMetadata(Authentication authentication, Map<String, Object> tokenMetadata) {
+        if (tokenMetadata == null) {
+            throw new ElasticsearchSecurityException("Authentication did not contain metadata");
+        }
+        if (authentication == null) {
+            throw new ElasticsearchSecurityException("No active authentication");
+        }
+        final User user = authentication.getUser();
+        if (user == null) {
+            throw new ElasticsearchSecurityException("No active user");
+        }
+
+        final Authentication.RealmRef ref = authentication.getAuthenticatedBy();
+        if (ref == null || Strings.isNullOrEmpty(ref.getName())) {
+            throw new ElasticsearchSecurityException("Authentication {} has no authenticating realm",
+                authentication);
+        }
+        final Realm realm = this.realms.realm(authentication.getAuthenticatedBy().getName());
+        if (realm == null) {
+            throw new ElasticsearchSecurityException("Authenticating realm {} does not exist", ref.getName());
+        }
+        if (realm instanceof OpenIdConnectRealm == false) {
+            throw new IllegalArgumentException("Access token is not valid for an OpenID Connect realm");
+        }
+    }
+
+    private Object getFromMetadata(Map<String, Object> metadata, String key) {
+        if (metadata.containsKey(key) == false) {
+            throw new ElasticsearchSecurityException("Authentication token does not have OpenID Connect metadata [{}]", key);
+        }
+        Object value = metadata.get(key);
+        if (null != value && value instanceof String == false) {
+            throw new ElasticsearchSecurityException("In authentication token, OpenID Connect metadata [{}] is [{}] rather than " +
+                "String", key, value.getClass());
+        }
+        return value;
+
+    }
+    private void invalidateRefreshToken(String refreshToken, ActionListener<TokensInvalidationResult> listener) {
+        if (refreshToken == null) {
+            listener.onResponse(null);
+        } else {
+            tokenService.invalidateRefreshToken(refreshToken, listener);
+        }
+    }
+}
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectPrepareAuthenticationAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectPrepareAuthenticationAction.java
new file mode 100644
index 00000000000..652daf18f53
--- /dev/null
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectPrepareAuthenticationAction.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.action.oidc;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchSecurityException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.HandledTransportAction;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectPrepareAuthenticationAction;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectPrepareAuthenticationRequest;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectPrepareAuthenticationResponse;
+import org.elasticsearch.xpack.core.security.authc.Realm;
+import org.elasticsearch.xpack.security.authc.Realms;
+import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectRealm;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+public class TransportOpenIdConnectPrepareAuthenticationAction extends HandledTransportAction<OpenIdConnectPrepareAuthenticationRequest,
+    OpenIdConnectPrepareAuthenticationResponse> {
+
+    private final Realms realms;
+
+    @Inject
+    public TransportOpenIdConnectPrepareAuthenticationAction(TransportService transportService,
+                                                             ActionFilters actionFilters, Realms realms) {
+        super(OpenIdConnectPrepareAuthenticationAction.NAME, transportService, actionFilters,
+            (Writeable.Reader<OpenIdConnectPrepareAuthenticationRequest>) OpenIdConnectPrepareAuthenticationRequest::new);
+        this.realms = realms;
+    }
+
+    @Override
+    protected void doExecute(Task task, OpenIdConnectPrepareAuthenticationRequest request,
+                             ActionListener<OpenIdConnectPrepareAuthenticationResponse> listener) {
+        Realm realm = null;
+        if (Strings.hasText(request.getIssuer())) {
+            List<OpenIdConnectRealm> matchingRealms = this.realms.stream()
+                .filter(r -> r instanceof OpenIdConnectRealm && ((OpenIdConnectRealm) r).isIssuerValid(request.getIssuer()))
+                .map(r -> (OpenIdConnectRealm) r)
+                .collect(Collectors.toList());
+            if (matchingRealms.isEmpty()) {
+                listener.onFailure(
+                    new ElasticsearchSecurityException("Cannot find OpenID Connect realm with issuer [{}]", request.getIssuer()));
+            } else if (matchingRealms.size() > 1) {
+                listener.onFailure(
+                    new ElasticsearchSecurityException("Found multiple OpenID Connect realm with issuer [{}]", request.getIssuer()));
+            } else {
+                realm = matchingRealms.get(0);
+            }
+        } else if (Strings.hasText(request.getRealmName())) {
+            realm = this.realms.realm(request.getRealmName());
+        }
+
+        if (realm instanceof OpenIdConnectRealm) {
+            prepareAuthenticationResponse((OpenIdConnectRealm) realm, request.getState(), request.getNonce(), request.getLoginHint(),
+                listener);
+        } else {
+            listener.onFailure(
+                new ElasticsearchSecurityException("Cannot find OpenID Connect realm with name [{}]", request.getRealmName()));
+        }
+    }
+
+    private void prepareAuthenticationResponse(OpenIdConnectRealm realm, String state, String nonce, String loginHint,
+                                               ActionListener<OpenIdConnectPrepareAuthenticationResponse> listener) {
+        try {
+            final OpenIdConnectPrepareAuthenticationResponse authenticationResponse =
+                realm.buildAuthenticationRequestUri(state, nonce, loginHint);
+            listener.onResponse(authenticationResponse);
+        } catch (ElasticsearchException e) {
+            listener.onFailure(e);
+        }
+    }
+}
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java
index 54bffd8a215..66206d50137 100644
--- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java
@@ -18,6 +18,7 @@ import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings;
+import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings;
 import org.elasticsearch.xpack.core.ssl.SSLService;
@@ -27,6 +28,7 @@ import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm;
 import org.elasticsearch.xpack.security.authc.file.FileRealm;
 import org.elasticsearch.xpack.security.authc.kerberos.KerberosRealm;
 import org.elasticsearch.xpack.security.authc.ldap.LdapRealm;
+import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectRealm;
 import org.elasticsearch.xpack.security.authc.pki.PkiRealm;
 import org.elasticsearch.xpack.security.authc.saml.SamlRealm;
 import org.elasticsearch.xpack.security.authc.support.RoleMappingFileBootstrapCheck;
@@ -45,6 +47,7 @@ import java.util.stream.Collectors;
 /**
  * Provides a single entry point into dealing with all standard XPack security {@link Realm realms}.
  * This class does not handle extensions.
+ *
  * @see Realms for the component that manages configured realms (including custom extension realms)
  */
 public final class InternalRealms {
@@ -53,15 +56,16 @@ public final class InternalRealms {
      * The list of all <em>internal</em> realm types, excluding {@link ReservedRealm#TYPE}.
      */
     private static final Set<String> XPACK_TYPES = Collections
-            .unmodifiableSet(Sets.newHashSet(NativeRealmSettings.TYPE, FileRealmSettings.TYPE, LdapRealmSettings.AD_TYPE,
-                    LdapRealmSettings.LDAP_TYPE, PkiRealmSettings.TYPE, SamlRealmSettings.TYPE, KerberosRealmSettings.TYPE));
+        .unmodifiableSet(Sets.newHashSet(NativeRealmSettings.TYPE, FileRealmSettings.TYPE, LdapRealmSettings.AD_TYPE,
+            LdapRealmSettings.LDAP_TYPE, PkiRealmSettings.TYPE, SamlRealmSettings.TYPE, KerberosRealmSettings.TYPE,
+            OpenIdConnectRealmSettings.TYPE));
 
     /**
      * The list of all standard realm types, which are those provided by x-pack and do not have extensive
      * interaction with third party sources
      */
     private static final Set<String> STANDARD_TYPES = Collections.unmodifiableSet(Sets.newHashSet(NativeRealmSettings.TYPE,
-            FileRealmSettings.TYPE, LdapRealmSettings.AD_TYPE, LdapRealmSettings.LDAP_TYPE, PkiRealmSettings.TYPE));
+        FileRealmSettings.TYPE, LdapRealmSettings.AD_TYPE, LdapRealmSettings.LDAP_TYPE, PkiRealmSettings.TYPE));
 
     /**
      * Determines whether <code>type</code> is an internal realm-type that is provided by x-pack,
@@ -90,6 +94,7 @@ public final class InternalRealms {
     /**
      * Creates {@link Realm.Factory factories} for each <em>internal</em> realm type.
      * This excludes the {@link ReservedRealm}, as it cannot be created dynamically.
+     *
      * @return A map from <em>realm-type</em> to <code>Factory</code>
      */
     public static Map<String, Realm.Factory> getFactories(ThreadPool threadPool, ResourceWatcherService resourceWatcherService,
@@ -105,12 +110,14 @@ public final class InternalRealms {
             return nativeRealm;
         });
         map.put(LdapRealmSettings.AD_TYPE, config -> new LdapRealm(config, sslService,
-                resourceWatcherService, nativeRoleMappingStore, threadPool));
+            resourceWatcherService, nativeRoleMappingStore, threadPool));
         map.put(LdapRealmSettings.LDAP_TYPE, config -> new LdapRealm(config,
-                sslService, resourceWatcherService, nativeRoleMappingStore, threadPool));
+            sslService, resourceWatcherService, nativeRoleMappingStore, threadPool));
         map.put(PkiRealmSettings.TYPE, config -> new PkiRealm(config, resourceWatcherService, nativeRoleMappingStore));
         map.put(SamlRealmSettings.TYPE, config -> SamlRealm.create(config, sslService, resourceWatcherService, nativeRoleMappingStore));
         map.put(KerberosRealmSettings.TYPE, config -> new KerberosRealm(config, nativeRoleMappingStore, threadPool));
+        map.put(OpenIdConnectRealmSettings.TYPE, config -> new OpenIdConnectRealm(config, sslService, nativeRoleMappingStore,
+            resourceWatcherService));
         return Collections.unmodifiableMap(map);
     }
 
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java
new file mode 100644
index 00000000000..32cffc80071
--- /dev/null
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java
@@ -0,0 +1,722 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.authc.oidc;
+
+import com.nimbusds.jose.JOSEException;
+import com.nimbusds.jose.JWSAlgorithm;
+import com.nimbusds.jose.jwk.JWK;
+import com.nimbusds.jose.jwk.JWKSelector;
+import com.nimbusds.jose.jwk.JWKSet;
+import com.nimbusds.jose.jwk.source.JWKSource;
+import com.nimbusds.jose.proc.BadJOSEException;
+import com.nimbusds.jose.proc.JWSVerificationKeySelector;
+import com.nimbusds.jose.proc.SecurityContext;
+import com.nimbusds.jose.util.IOUtils;
+import com.nimbusds.jwt.JWT;
+import com.nimbusds.jwt.JWTClaimsSet;
+import com.nimbusds.oauth2.sdk.AuthorizationCode;
+import com.nimbusds.oauth2.sdk.AuthorizationCodeGrant;
+import com.nimbusds.oauth2.sdk.ErrorObject;
+import com.nimbusds.oauth2.sdk.ResponseType;
+import com.nimbusds.oauth2.sdk.TokenErrorResponse;
+import com.nimbusds.oauth2.sdk.auth.Secret;
+import com.nimbusds.oauth2.sdk.id.State;
+import com.nimbusds.oauth2.sdk.token.AccessToken;
+import com.nimbusds.oauth2.sdk.token.BearerTokenError;
+import com.nimbusds.oauth2.sdk.util.JSONObjectUtils;
+import com.nimbusds.openid.connect.sdk.AuthenticationErrorResponse;
+import com.nimbusds.openid.connect.sdk.AuthenticationResponse;
+import com.nimbusds.openid.connect.sdk.AuthenticationResponseParser;
+import com.nimbusds.openid.connect.sdk.AuthenticationSuccessResponse;
+import com.nimbusds.openid.connect.sdk.Nonce;
+import com.nimbusds.openid.connect.sdk.OIDCTokenResponse;
+import com.nimbusds.openid.connect.sdk.claims.AccessTokenHash;
+import com.nimbusds.openid.connect.sdk.token.OIDCTokens;
+import com.nimbusds.openid.connect.sdk.validators.AccessTokenValidator;
+import com.nimbusds.openid.connect.sdk.validators.IDTokenValidator;
+import net.minidev.json.JSONObject;
+import org.apache.commons.codec.Charsets;
+import org.apache.http.Header;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.NameValuePair;
+import org.apache.http.auth.AuthenticationException;
+import org.apache.http.auth.UsernamePasswordCredentials;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.client.entity.UrlEncodedFormEntity;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.concurrent.FutureCallback;
+import org.apache.http.config.Registry;
+import org.apache.http.config.RegistryBuilder;
+import org.apache.http.conn.ssl.DefaultHostnameVerifier;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.entity.ContentType;
+import org.apache.http.impl.auth.BasicScheme;
+import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
+import org.apache.http.impl.nio.client.HttpAsyncClients;
+import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager;
+import org.apache.http.impl.nio.reactor.DefaultConnectingIOReactor;
+import org.apache.http.message.BasicNameValuePair;
+import org.apache.http.nio.conn.NoopIOSessionStrategy;
+import org.apache.http.nio.conn.SchemeIOSessionStrategy;
+import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
+import org.apache.http.nio.reactor.ConnectingIOReactor;
+import org.apache.http.util.EntityUtils;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.elasticsearch.ElasticsearchSecurityException;
+import org.elasticsearch.SpecialPermission;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.common.CheckedRunnable;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.common.util.concurrent.ListenableFuture;
+import org.elasticsearch.watcher.FileChangesListener;
+import org.elasticsearch.watcher.FileWatcher;
+import org.elasticsearch.watcher.ResourceWatcherService;
+import org.elasticsearch.xpack.core.security.authc.RealmConfig;
+import org.elasticsearch.xpack.core.security.authc.RealmSettings;
+import org.elasticsearch.xpack.core.ssl.SSLConfiguration;
+import org.elasticsearch.xpack.core.ssl.SSLService;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.SSLContext;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Path;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+import java.text.ParseException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.ALLOWED_CLOCK_SKEW;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.HTTP_CONNECT_TIMEOUT;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.HTTP_CONNECTION_READ_TIMEOUT;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.HTTP_MAX_CONNECTIONS;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.HTTP_MAX_ENDPOINT_CONNECTIONS;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.HTTP_SOCKET_TIMEOUT;
+
+/**
+ * Handles an OpenID Connect Authentication response as received by the facilitator. In the case of an implicit flow, validates
+ * the ID Token and extracts the elasticsearch user properties from it. In the case of an authorization code flow, it first
+ * exchanges the code in the authentication response for an ID Token at the token endpoint of the OpenID Connect Provider.
+ */
+public class OpenIdConnectAuthenticator {
+
+    private final RealmConfig realmConfig;
+    private final OpenIdConnectProviderConfiguration opConfig;
+    private final RelyingPartyConfiguration rpConfig;
+    private final SSLService sslService;
+    private AtomicReference<IDTokenValidator> idTokenValidator = new AtomicReference<>();
+    private final CloseableHttpAsyncClient httpClient;
+    private final ResourceWatcherService watcherService;
+
+    private static final Logger LOGGER = LogManager.getLogger(OpenIdConnectAuthenticator.class);
+
+    public OpenIdConnectAuthenticator(RealmConfig realmConfig, OpenIdConnectProviderConfiguration opConfig,
+                                      RelyingPartyConfiguration rpConfig, SSLService sslService, ResourceWatcherService watcherService) {
+        this.realmConfig = realmConfig;
+        this.opConfig = opConfig;
+        this.rpConfig = rpConfig;
+        this.sslService = sslService;
+        this.httpClient = createHttpClient();
+        this.watcherService = watcherService;
+        this.idTokenValidator.set(createIdTokenValidator());
+    }
+
+    // For testing
+    OpenIdConnectAuthenticator(RealmConfig realmConfig, OpenIdConnectProviderConfiguration opConfig, RelyingPartyConfiguration rpConfig,
+                               SSLService sslService, IDTokenValidator idTokenValidator, ResourceWatcherService watcherService) {
+        this.realmConfig = realmConfig;
+        this.opConfig = opConfig;
+        this.rpConfig = rpConfig;
+        this.sslService = sslService;
+        this.httpClient = createHttpClient();
+        this.idTokenValidator.set(idTokenValidator);
+        this.watcherService = watcherService;
+    }
+
+    /**
+     * Processes an OpenID Connect Response to an Authentication Request that comes in the form of a URL with the necessary parameters,
+     * that is contained in the provided Token. If the response is valid, it calls the provided listener with a set of OpenID Connect
+     * claims that identify the authenticated user. If the UserInfo endpoint is specified in the configuration, we attempt to make a
+     * UserInfo request and add the returned claims to the Id Token claims.
+     *
+     * @param token    The OpenIdConnectToken to consume
+     * @param listener The listener to notify with the resolved {@link JWTClaimsSet}
+     */
+    public void authenticate(OpenIdConnectToken token, final ActionListener<JWTClaimsSet> listener) {
+        try {
+            AuthenticationResponse authenticationResponse = AuthenticationResponseParser.parse(new URI(token.getRedirectUrl()));
+            final Nonce expectedNonce = token.getNonce();
+            State expectedState = token.getState();
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("OpenID Connect Provider redirected user to [{}]. Expected Nonce is [{}] and expected State is [{}]",
+                    token.getRedirectUrl(), expectedNonce, expectedState);
+            }
+            if (authenticationResponse instanceof AuthenticationErrorResponse) {
+                ErrorObject error = ((AuthenticationErrorResponse) authenticationResponse).getErrorObject();
+                listener.onFailure(new ElasticsearchSecurityException("OpenID Connect Provider response indicates authentication failure" +
+                    "Code=[{}], Description=[{}]", error.getCode(), error.getDescription()));
+                return;
+            }
+            final AuthenticationSuccessResponse response = authenticationResponse.toSuccessResponse();
+            validateState(expectedState, response.getState());
+            validateResponseType(response);
+            if (rpConfig.getResponseType().impliesCodeFlow()) {
+                final AuthorizationCode code = response.getAuthorizationCode();
+                exchangeCodeForToken(code, ActionListener.wrap(tokens -> {
+                    final AccessToken accessToken = tokens.v1();
+                    final JWT idToken = tokens.v2();
+                    validateAccessToken(accessToken, idToken);
+                    getUserClaims(accessToken, idToken, expectedNonce, true, listener);
+                }, listener::onFailure));
+            } else {
+                final JWT idToken = response.getIDToken();
+                final AccessToken accessToken = response.getAccessToken();
+                validateAccessToken(accessToken, idToken);
+                getUserClaims(accessToken, idToken, expectedNonce, true, listener);
+            }
+        } catch (ElasticsearchSecurityException e) {
+            // Don't wrap in a new ElasticsearchSecurityException
+            listener.onFailure(e);
+        } catch (Exception e) {
+            listener.onFailure(new ElasticsearchSecurityException("Failed to consume the OpenID connect response. ", e));
+        }
+    }
+
+    /**
+     * Collects all the user claims we can get for the authenticated user. This happens in two steps:
+     * <ul>
+     * <li>First we attempt to validate the Id Token we have received and get any claims it contains</li>
+     * <li>If we have received an Access Token and the UserInfo endpoint is configured, we also attempt to get the user info response
+     * from there and parse the returned claims,
+     * see {@link OpenIdConnectAuthenticator#getAndCombineUserInfoClaims(AccessToken, JWTClaimsSet, ActionListener)}</li>
+     * </ul>
+     *
+     * @param accessToken    The {@link AccessToken} that the OP has issued for this user
+     * @param idToken        The {@link JWT} Id Token that the OP has issued for this user
+     * @param expectedNonce  The nonce value we sent in the authentication request and should be contained in the Id Token
+     * @param claimsListener The listener to notify with the resolved {@link JWTClaimsSet}
+     */
+    private void getUserClaims(@Nullable AccessToken accessToken, JWT idToken, Nonce expectedNonce, boolean shouldRetry,
+                               ActionListener<JWTClaimsSet> claimsListener) {
+        try {
+            JWTClaimsSet verifiedIdTokenClaims = idTokenValidator.get().validate(idToken, expectedNonce).toJWTClaimsSet();
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("Received and validated the Id Token for the user: [{}]", verifiedIdTokenClaims);
+            }
+            // Add the Id Token string as a synthetic claim
+            final JSONObject verifiedIdTokenClaimsObject = verifiedIdTokenClaims.toJSONObject();
+            final JWTClaimsSet idTokenClaim = new JWTClaimsSet.Builder().claim("id_token_hint", idToken.serialize()).build();
+            verifiedIdTokenClaimsObject.merge(idTokenClaim.toJSONObject());
+            final JWTClaimsSet enrichedVerifiedIdTokenClaims = JWTClaimsSet.parse(verifiedIdTokenClaimsObject);
+            if (accessToken != null && opConfig.getUserinfoEndpoint() != null) {
+                getAndCombineUserInfoClaims(accessToken, enrichedVerifiedIdTokenClaims, claimsListener);
+            } else {
+                if (accessToken == null && opConfig.getUserinfoEndpoint() != null) {
+                    LOGGER.debug("UserInfo endpoint is configured but the OP didn't return an access token so we can't query it");
+                } else if (accessToken != null && opConfig.getUserinfoEndpoint() == null) {
+                    LOGGER.debug("OP returned an access token but the UserInfo endpoint is not configured.");
+                }
+                claimsListener.onResponse(enrichedVerifiedIdTokenClaims);
+            }
+        } catch (BadJOSEException e) {
+            // We only try to update the cached JWK set once if a remote source is used and
+            // RSA or ECDSA is used for signatures
+            if (shouldRetry
+                && JWSAlgorithm.Family.HMAC_SHA.contains(rpConfig.getSignatureAlgorithm()) == false
+                && opConfig.getJwkSetPath().startsWith("https://")) {
+                ((ReloadableJWKSource) ((JWSVerificationKeySelector) idTokenValidator.get().getJWSKeySelector()).getJWKSource())
+                    .triggerReload(ActionListener.wrap(v -> {
+                        getUserClaims(accessToken, idToken, expectedNonce, false, claimsListener);
+                    }, ex -> {
+                        LOGGER.trace("Attempted and failed to refresh JWK cache upon token validation failure", e);
+                        claimsListener.onFailure(ex);
+                    }));
+            } else {
+                claimsListener.onFailure(new ElasticsearchSecurityException("Failed to parse or validate the ID Token", e));
+            }
+        } catch (com.nimbusds.oauth2.sdk.ParseException | ParseException | JOSEException e) {
+            claimsListener.onFailure(new ElasticsearchSecurityException("Failed to parse or validate the ID Token", e));
+        }
+    }
+
+    /**
+     * Validates an access token according to the
+     * <a href="https://openid.net/specs/openid-connect-core-1_0.html#ImplicitTokenValidation">specification</a>.
+     * <p>
+     * When using the authorization code flow the OP might not provide the at_hash parameter in the
+     * Id Token as allowed in the specification. In such a case we can't validate the access token
+     * but this is considered safe as it was received in a back channel communication that was protected
+     * by TLS. Also when using the implicit flow with the response type set to "id_token", no Access
+     * Token will be returned from the OP
+     *
+     * @param accessToken The Access Token to validate. Can be null when the configured response type is "id_token"
+     * @param idToken     The Id Token that was received in the same response
+     */
+    private void validateAccessToken(AccessToken accessToken, JWT idToken) {
+        try {
+            if (rpConfig.getResponseType().equals(ResponseType.parse("id_token token")) ||
+                rpConfig.getResponseType().equals(ResponseType.parse("code"))) {
+                assert (accessToken != null) : "Access Token cannot be null for Response Type " + rpConfig.getResponseType().toString();
+                final boolean isValidationOptional = rpConfig.getResponseType().equals(ResponseType.parse("code"));
+                // only "Bearer" is defined in the specification but check just in case
+                if (accessToken.getType().toString().equals("Bearer") == false) {
+                    throw new ElasticsearchSecurityException("Invalid access token type [{}], while [Bearer] was expected",
+                        accessToken.getType());
+                }
+                String atHashValue = idToken.getJWTClaimsSet().getStringClaim("at_hash");
+                if (Strings.hasText(atHashValue) == false) {
+                    if (isValidationOptional == false) {
+                        throw new ElasticsearchSecurityException("Failed to verify access token. ID Token doesn't contain at_hash claim ");
+                    }
+                } else {
+                    AccessTokenHash atHash = new AccessTokenHash(atHashValue);
+                    JWSAlgorithm jwsAlgorithm = JWSAlgorithm.parse(idToken.getHeader().getAlgorithm().getName());
+                    AccessTokenValidator.validate(accessToken, jwsAlgorithm, atHash);
+                }
+            } else if (rpConfig.getResponseType().equals(ResponseType.parse("id_token")) && accessToken != null) {
+                // This should NOT happen and indicates a misconfigured OP. Warn the user but do not fail
+                LOGGER.warn("Access Token incorrectly returned from the OpenId Connect Provider while using \"id_token\" response type.");
+            }
+        } catch (Exception e) {
+            throw new ElasticsearchSecurityException("Failed to verify access token.", e);
+        }
+    }
+
+    /**
+     * Reads and parses a JWKSet from a file
+     *
+     * @param jwkSetPath The path to the file that contains the JWKs as a string.
+     * @return the parsed {@link JWKSet}
+     * @throws ParseException if the file cannot be parsed
+     * @throws IOException    if the file cannot be read
+     */
+    @SuppressForbidden(reason = "uses toFile")
+    private JWKSet readJwkSetFromFile(String jwkSetPath) throws IOException, ParseException {
+        final Path path = realmConfig.env().configFile().resolve(jwkSetPath);
+        return JWKSet.load(path.toFile());
+    }
+
+    /**
+     * Validate that the response we received corresponds to the response type we requested
+     *
+     * @param response The {@link AuthenticationSuccessResponse} we received
+     * @throws ElasticsearchSecurityException if the response is not the expected one for the configured response type
+     */
+    private void validateResponseType(AuthenticationSuccessResponse response) {
+        if (rpConfig.getResponseType().equals(response.impliedResponseType()) == false) {
+            throw new ElasticsearchSecurityException("Unexpected response type [{}], while [{}] is configured",
+                response.impliedResponseType(), rpConfig.getResponseType());
+        }
+    }
+
+    /**
+     * Validate that the state parameter the response contained corresponds to the one that we generated in the
+     * beginning of this authentication attempt and was stored with the user's session at the facilitator
+     *
+     * @param expectedState The state that was originally generated
+     * @param state         The state that was contained in the response
+     */
+    private void validateState(State expectedState, State state) {
+        if (null == state) {
+            throw new ElasticsearchSecurityException("Failed to validate the response, the response did not contain a state parameter");
+        } else if (null == expectedState) {
+            throw new ElasticsearchSecurityException("Failed to validate the response, the user's session did not contain a state " +
+                "parameter");
+        } else if (state.equals(expectedState) == false) {
+            throw new ElasticsearchSecurityException("Invalid state parameter [{}], while [{}] was expected", state, expectedState);
+        }
+    }
+
+    /**
+     * Attempts to make a request to the UserInfo Endpoint of the OpenID Connect provider
+     */
+    private void getAndCombineUserInfoClaims(AccessToken accessToken, JWTClaimsSet verifiedIdTokenClaims,
+                                             ActionListener<JWTClaimsSet> claimsListener) {
+        try {
+            final HttpGet httpGet = new HttpGet(opConfig.getUserinfoEndpoint());
+            httpGet.setHeader("Authorization", "Bearer " + accessToken.getValue());
+            AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
+                httpClient.execute(httpGet, new FutureCallback<HttpResponse>() {
+                    @Override
+                    public void completed(HttpResponse result) {
+                        handleUserinfoResponse(result, verifiedIdTokenClaims, claimsListener);
+                    }
+
+                    @Override
+                    public void failed(Exception ex) {
+                        claimsListener.onFailure(new ElasticsearchSecurityException("Failed to get claims from the Userinfo Endpoint.",
+                            ex));
+                    }
+
+                    @Override
+                    public void cancelled() {
+                        claimsListener.onFailure(
+                            new ElasticsearchSecurityException("Failed to get claims from the Userinfo Endpoint. Request was cancelled"));
+                    }
+                });
+                return null;
+            });
+        } catch (Exception e) {
+            claimsListener.onFailure(new ElasticsearchSecurityException("Failed to get user information from the UserInfo endpoint.", e));
+        }
+    }
+
+    /**
+     * Handle the UserInfo Response from the OpenID Connect Provider. If successful, merge the returned claims with the claims
+     * of the Id Token and call the provided listener.
+     */
+    private void handleUserinfoResponse(HttpResponse httpResponse, JWTClaimsSet verifiedIdTokenClaims,
+                                        ActionListener<JWTClaimsSet> claimsListener) {
+        try {
+            final HttpEntity entity = httpResponse.getEntity();
+            final Header encodingHeader = entity.getContentEncoding();
+            final Charset encoding = encodingHeader == null ? StandardCharsets.UTF_8 : Charsets.toCharset(encodingHeader.getValue());
+            final Header contentHeader = entity.getContentType();
+            final String contentAsString = EntityUtils.toString(entity, encoding);
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("Received UserInfo Response from OP with status [{}] and content [{}] ",
+                    httpResponse.getStatusLine().getStatusCode(), contentAsString);
+            }
+            if (httpResponse.getStatusLine().getStatusCode() == 200) {
+                if (ContentType.parse(contentHeader.getValue()).getMimeType().equals("application/json")) {
+                    final JWTClaimsSet userInfoClaims = JWTClaimsSet.parse(contentAsString);
+                    if (LOGGER.isTraceEnabled()) {
+                        LOGGER.trace("Successfully retrieved user information: [{}]", userInfoClaims.toJSONObject().toJSONString());
+                    }
+                    final JSONObject combinedClaims = verifiedIdTokenClaims.toJSONObject();
+                    combinedClaims.merge(userInfoClaims.toJSONObject());
+                    claimsListener.onResponse(JWTClaimsSet.parse(combinedClaims));
+                } else if (ContentType.parse(contentHeader.getValue()).getMimeType().equals("application/jwt")) {
+                    //TODO Handle validating possibly signed responses
+                    claimsListener.onFailure(new IllegalStateException("Unable to parse Userinfo Response. Signed/encryopted JWTs are" +
+                        "not currently supported"));
+                } else {
+                    claimsListener.onFailure(new IllegalStateException("Unable to parse Userinfo Response. Content type was expected to " +
+                        "be [application/json] or [appliation/jwt] but was [" + contentHeader.getValue() + "]"));
+                }
+            } else {
+                final Header wwwAuthenticateHeader = httpResponse.getFirstHeader("WWW-Authenticate");
+                if (Strings.hasText(wwwAuthenticateHeader.getValue())) {
+                    BearerTokenError error = BearerTokenError.parse(wwwAuthenticateHeader.getValue());
+                    claimsListener.onFailure(
+                        new ElasticsearchSecurityException("Failed to get user information from the UserInfo endpoint. Code=[{}], " +
+                            "Description=[{}]", error.getCode(), error.getDescription()));
+                } else {
+                    claimsListener.onFailure(
+                        new ElasticsearchSecurityException("Failed to get user information from the UserInfo endpoint. Code=[{}], " +
+                            "Description=[{}]", httpResponse.getStatusLine().getStatusCode(),
+                            httpResponse.getStatusLine().getReasonPhrase()));
+                }
+            }
+        } catch (IOException | com.nimbusds.oauth2.sdk.ParseException | ParseException e) {
+            claimsListener.onFailure(new ElasticsearchSecurityException("Failed to get user information from the UserInfo endpoint.",
+                e));
+        }
+    }
+
+    /**
+     * Attempts to make a request to the Token Endpoint of the OpenID Connect provider in order to exchange an
+     * authorization code for an Id Token (and potentially an Access Token)
+     */
+    private void exchangeCodeForToken(AuthorizationCode code, ActionListener<Tuple<AccessToken, JWT>> tokensListener) {
+        try {
+            final AuthorizationCodeGrant codeGrant = new AuthorizationCodeGrant(code, rpConfig.getRedirectUri());
+            final HttpPost httpPost = new HttpPost(opConfig.getTokenEndpoint());
+            final List<NameValuePair> params = new ArrayList<>();
+            for (Map.Entry<String, List<String>> entry : codeGrant.toParameters().entrySet()) {
+                // All parameters of AuthorizationCodeGrant are singleton lists
+                params.add(new BasicNameValuePair(entry.getKey(), entry.getValue().get(0)));
+            }
+            httpPost.setEntity(new UrlEncodedFormEntity(params));
+            httpPost.setHeader("Content-type", "application/x-www-form-urlencoded");
+            UsernamePasswordCredentials creds = new UsernamePasswordCredentials(rpConfig.getClientId().getValue(),
+                rpConfig.getClientSecret().toString());
+            httpPost.addHeader(new BasicScheme().authenticate(creds, httpPost, null));
+            SpecialPermission.check();
+            AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
+                httpClient.execute(httpPost, new FutureCallback<HttpResponse>() {
+                    @Override
+                    public void completed(HttpResponse result) {
+                        handleTokenResponse(result, tokensListener);
+                    }
+
+                    @Override
+                    public void failed(Exception ex) {
+                        tokensListener.onFailure(
+                            new ElasticsearchSecurityException("Failed to exchange code for Id Token using the Token Endpoint.", ex));
+                    }
+
+                    @Override
+                    public void cancelled() {
+                        final String message = "Failed to exchange code for Id Token using the Token Endpoint. Request was cancelled";
+                        tokensListener.onFailure(new ElasticsearchSecurityException(message));
+                    }
+                });
+                return null;
+            });
+        } catch (AuthenticationException | UnsupportedEncodingException e) {
+            tokensListener.onFailure(
+                new ElasticsearchSecurityException("Failed to exchange code for Id Token using the Token Endpoint.", e));
+        }
+    }
+
+    /**
+     * Handle the Token Response from the OpenID Connect Provider. If successful, extract the (yet not validated) Id Token
+     * and access token and call the provided listener.
+     */
+    private void handleTokenResponse(HttpResponse httpResponse, ActionListener<Tuple<AccessToken, JWT>> tokensListener) {
+        try {
+            final HttpEntity entity = httpResponse.getEntity();
+            final Header encodingHeader = entity.getContentEncoding();
+            final Header contentHeader = entity.getContentType();
+            if (ContentType.parse(contentHeader.getValue()).getMimeType().equals("application/json") == false) {
+                tokensListener.onFailure(new IllegalStateException("Unable to parse Token Response. Content type was expected to be " +
+                    "[application/json] but was [" + contentHeader.getValue() + "]"));
+                return;
+            }
+            final Charset encoding = encodingHeader == null ? StandardCharsets.UTF_8 : Charsets.toCharset(encodingHeader.getValue());
+            final String json = EntityUtils.toString(entity, encoding);
+            if (LOGGER.isTraceEnabled()) {
+                LOGGER.trace("Received Token Response from OP with status [{}] and content [{}] ",
+                    httpResponse.getStatusLine().getStatusCode(), json);
+            }
+            final OIDCTokenResponse oidcTokenResponse = OIDCTokenResponse.parse(JSONObjectUtils.parse(json));
+            if (oidcTokenResponse.indicatesSuccess() == false) {
+                TokenErrorResponse errorResponse = oidcTokenResponse.toErrorResponse();
+                tokensListener.onFailure(
+                    new ElasticsearchSecurityException("Failed to exchange code for Id Token. Code=[{}], Description=[{}]",
+                        errorResponse.getErrorObject().getCode(), errorResponse.getErrorObject().getDescription()));
+            } else {
+                OIDCTokenResponse successResponse = oidcTokenResponse.toSuccessResponse();
+                final OIDCTokens oidcTokens = successResponse.getOIDCTokens();
+                final AccessToken accessToken = oidcTokens.getAccessToken();
+                final JWT idToken = oidcTokens.getIDToken();
+                if (LOGGER.isTraceEnabled()) {
+                    LOGGER.trace("Successfully exchanged code for ID Token: [{}] and Access Token [{}]",
+                        idToken, accessToken);
+                }
+                if (idToken == null) {
+                    tokensListener.onFailure(new ElasticsearchSecurityException("Token Response did not contain an ID Token or parsing of" +
+                        " the JWT failed."));
+                    return;
+                }
+                tokensListener.onResponse(new Tuple<>(accessToken, idToken));
+            }
+        } catch (IOException | com.nimbusds.oauth2.sdk.ParseException e) {
+            tokensListener.onFailure(
+                new ElasticsearchSecurityException("Failed to exchange code for Id Token using the Token Endpoint. " +
+                    "Unable to parse Token Response", e));
+        }
+    }
+
+    /**
+     * Creates a {@link CloseableHttpAsyncClient} that uses a {@link PoolingNHttpClientConnectionManager}
+     */
+    private CloseableHttpAsyncClient createHttpClient() {
+        try {
+            SpecialPermission.check();
+            return AccessController.doPrivileged(
+                (PrivilegedExceptionAction<CloseableHttpAsyncClient>) () -> {
+                    ConnectingIOReactor ioReactor = new DefaultConnectingIOReactor();
+                    final String sslKey = RealmSettings.realmSslPrefix(realmConfig.identifier());
+                    final SSLConfiguration sslConfiguration = sslService.getSSLConfiguration(sslKey);
+                    final SSLContext clientContext = sslService.sslContext(sslConfiguration);
+                    boolean isHostnameVerificationEnabled = sslConfiguration.verificationMode().isHostnameVerificationEnabled();
+                    final HostnameVerifier verifier = isHostnameVerificationEnabled ?
+                        new DefaultHostnameVerifier() : NoopHostnameVerifier.INSTANCE;
+                    Registry<SchemeIOSessionStrategy> registry = RegistryBuilder.<SchemeIOSessionStrategy>create()
+                        .register("http", NoopIOSessionStrategy.INSTANCE)
+                        .register("https", new SSLIOSessionStrategy(clientContext, verifier))
+                        .build();
+                    PoolingNHttpClientConnectionManager connectionManager = new PoolingNHttpClientConnectionManager(ioReactor, registry);
+                    connectionManager.setDefaultMaxPerRoute(realmConfig.getSetting(HTTP_MAX_ENDPOINT_CONNECTIONS));
+                    connectionManager.setMaxTotal(realmConfig.getSetting(HTTP_MAX_CONNECTIONS));
+                    final RequestConfig requestConfig = RequestConfig.custom()
+                        .setConnectTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_CONNECT_TIMEOUT).getMillis()))
+                        .setConnectionRequestTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_CONNECTION_READ_TIMEOUT).getSeconds()))
+                        .setSocketTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_SOCKET_TIMEOUT).getMillis())).build();
+                    CloseableHttpAsyncClient httpAsyncClient = HttpAsyncClients.custom()
+                        .setConnectionManager(connectionManager)
+                        .setDefaultRequestConfig(requestConfig)
+                        .build();
+                    httpAsyncClient.start();
+                    return httpAsyncClient;
+                });
+        } catch (PrivilegedActionException e) {
+            throw new IllegalStateException("Unable to create a HttpAsyncClient instance", e);
+        }
+    }
+
+    /*
+     * Creates an {@link IDTokenValidator} based on the current Relying Party configuration
+     */
+    IDTokenValidator createIdTokenValidator() {
+        try {
+            final JWSAlgorithm requestedAlgorithm = rpConfig.getSignatureAlgorithm();
+            final int allowedClockSkew = Math.toIntExact(realmConfig.getSetting(ALLOWED_CLOCK_SKEW).getMillis());
+            final IDTokenValidator idTokenValidator;
+            if (JWSAlgorithm.Family.HMAC_SHA.contains(requestedAlgorithm)) {
+                final Secret clientSecret = new Secret(rpConfig.getClientSecret().toString());
+                idTokenValidator =
+                    new IDTokenValidator(opConfig.getIssuer(), rpConfig.getClientId(), requestedAlgorithm, clientSecret);
+            } else {
+                String jwkSetPath = opConfig.getJwkSetPath();
+                if (jwkSetPath.startsWith("https://")) {
+                    final JWSVerificationKeySelector keySelector = new JWSVerificationKeySelector(requestedAlgorithm,
+                        new ReloadableJWKSource(new URL(jwkSetPath)));
+                    idTokenValidator = new IDTokenValidator(opConfig.getIssuer(), rpConfig.getClientId(), keySelector, null);
+                } else {
+                    setMetadataFileWatcher(jwkSetPath);
+                    final JWKSet jwkSet = readJwkSetFromFile(jwkSetPath);
+                    idTokenValidator = new IDTokenValidator(opConfig.getIssuer(), rpConfig.getClientId(), requestedAlgorithm, jwkSet);
+                }
+            }
+            idTokenValidator.setMaxClockSkew(allowedClockSkew);
+            return idTokenValidator;
+        } catch (IOException | ParseException e) {
+            throw new IllegalStateException("Unable to create a IDTokenValidator instance", e);
+        }
+    }
+
+    private void setMetadataFileWatcher(String jwkSetPath) throws IOException {
+        final Path path = realmConfig.env().configFile().resolve(jwkSetPath);
+        FileWatcher watcher = new FileWatcher(path);
+        watcher.addListener(new FileListener(LOGGER, () -> this.idTokenValidator.set(createIdTokenValidator())));
+        watcherService.add(watcher, ResourceWatcherService.Frequency.MEDIUM);
+    }
+
+    protected void close() {
+        try {
+            this.httpClient.close();
+        } catch (IOException e) {
+            LOGGER.debug("Unable to close the HttpAsyncClient", e);
+        }
+    }
+
+    private static class FileListener implements FileChangesListener {
+
+        private final Logger logger;
+        private final CheckedRunnable<Exception> onChange;
+
+        private FileListener(Logger logger, CheckedRunnable<Exception> onChange) {
+            this.logger = logger;
+            this.onChange = onChange;
+        }
+
+        @Override
+        public void onFileCreated(Path file) {
+            onFileChanged(file);
+        }
+
+        @Override
+        public void onFileDeleted(Path file) {
+            onFileChanged(file);
+        }
+
+        @Override
+        public void onFileChanged(Path file) {
+            try {
+                onChange.run();
+            } catch (Exception e) {
+                logger.warn(new ParameterizedMessage("An error occurred while reloading file {}", file), e);
+            }
+        }
+    }
+
+    /**
+     * Remote JSON Web Key source specified by a JWKSet URL. The retrieved JWK set is cached to
+     * avoid unnecessary http requests. A single attempt to update the cached set is made
+     * (with {@ling ReloadableJWKSource#triggerReload}) when the {@link IDTokenValidator} fails
+     * to validate an ID Token (because of an unknown key) as this might mean that the OpenID
+     * Connect Provider has rotated the signing keys.
+     */
+    class ReloadableJWKSource<C extends SecurityContext> implements JWKSource<C> {
+
+        private volatile JWKSet cachedJwkSet = new JWKSet();
+        private final AtomicReference<ListenableFuture<Void>> reloadFutureRef = new AtomicReference<>();
+        private final URL jwkSetPath;
+
+        private ReloadableJWKSource(URL jwkSetPath) {
+            this.jwkSetPath = jwkSetPath;
+            triggerReload(ActionListener.wrap(success -> LOGGER.trace("Successfully loaded and cached remote JWKSet on startup"),
+                failure -> LOGGER.trace("Failed to load and cache remote JWKSet on startup", failure)));
+        }
+
+        @Override
+        public List<JWK> get(JWKSelector jwkSelector, C context) {
+            return jwkSelector.select(cachedJwkSet);
+        }
+
+        void triggerReload(ActionListener<Void> toNotify) {
+            ListenableFuture<Void> future = reloadFutureRef.get();
+            while (future == null) {
+                future = new ListenableFuture<>();
+                if (reloadFutureRef.compareAndSet(null, future)) {
+                    reloadAsync(future);
+                } else {
+                    future = reloadFutureRef.get();
+                }
+            }
+            future.addListener(toNotify, EsExecutors.newDirectExecutorService(), null);
+        }
+
+        void reloadAsync(final ListenableFuture<Void> future) {
+            try {
+                final HttpGet httpGet = new HttpGet(jwkSetPath.toURI());
+                AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
+                    httpClient.execute(httpGet, new FutureCallback<HttpResponse>() {
+                        @Override
+                        public void completed(HttpResponse result) {
+                            try {
+                                cachedJwkSet = JWKSet.parse(IOUtils.readInputStreamToString(result.getEntity().getContent(),
+                                    StandardCharsets.UTF_8));
+                                reloadFutureRef.set(null);
+                                LOGGER.trace("Successfully refreshed and cached remote JWKSet");
+                            } catch (IOException | ParseException e) {
+                                failed(e);
+                            }
+                        }
+
+                        @Override
+                        public void failed(Exception ex) {
+                            future.onFailure(new ElasticsearchSecurityException("Failed to retrieve remote JWK set.", ex));
+                            reloadFutureRef.set(null);
+                        }
+
+                        @Override
+                        public void cancelled() {
+                            future.onFailure(
+                                new ElasticsearchSecurityException("Failed to retrieve remote JWK set. Request was cancelled."));
+                            reloadFutureRef.set(null);
+                        }
+                    });
+                    return null;
+                });
+            } catch (URISyntaxException e) {
+                future.onFailure(e);
+                reloadFutureRef.set(null);
+            }
+        }
+    }
+}
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectProviderConfiguration.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectProviderConfiguration.java
new file mode 100644
index 00000000000..272ab283c75
--- /dev/null
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectProviderConfiguration.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.authc.oidc;
+
+import com.nimbusds.oauth2.sdk.id.Issuer;
+import org.elasticsearch.common.Nullable;
+
+import java.net.URI;
+import java.util.Objects;
+
+/**
+ * A Class that contains all the OpenID Connect Provider configuration
+ */
+public class OpenIdConnectProviderConfiguration {
+    private final String providerName;
+    private final URI authorizationEndpoint;
+    private final URI tokenEndpoint;
+    private final URI userinfoEndpoint;
+    private final URI endsessionEndpoint;
+    private final Issuer issuer;
+    private final String jwkSetPath;
+
+    public OpenIdConnectProviderConfiguration(String providerName, Issuer issuer, String jwkSetPath, URI authorizationEndpoint,
+                                              URI tokenEndpoint, @Nullable URI userinfoEndpoint, @Nullable URI endsessionEndpoint) {
+        this.providerName = Objects.requireNonNull(providerName, "OP Name must be provided");
+        this.authorizationEndpoint = Objects.requireNonNull(authorizationEndpoint, "Authorization Endpoint must be provided");
+        this.tokenEndpoint = Objects.requireNonNull(tokenEndpoint, "Token Endpoint must be provided");
+        this.userinfoEndpoint = userinfoEndpoint;
+        this.endsessionEndpoint = endsessionEndpoint;
+        this.issuer = Objects.requireNonNull(issuer, "OP Issuer must be provided");
+        this.jwkSetPath = Objects.requireNonNull(jwkSetPath, "jwkSetUrl must be provided");
+    }
+
+    public String getProviderName() {
+        return providerName;
+    }
+
+    public URI getAuthorizationEndpoint() {
+        return authorizationEndpoint;
+    }
+
+    public URI getTokenEndpoint() {
+        return tokenEndpoint;
+    }
+
+    public URI getUserinfoEndpoint() {
+        return userinfoEndpoint;
+    }
+
+    public URI getEndsessionEndpoint() {
+        return endsessionEndpoint;
+    }
+
+    public Issuer getIssuer() {
+        return issuer;
+    }
+
+    public String getJwkSetPath() {
+        return jwkSetPath;
+    }
+}
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java
new file mode 100644
index 00000000000..72b04951a91
--- /dev/null
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java
@@ -0,0 +1,473 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.authc.oidc;
+
+import com.nimbusds.jose.JWSAlgorithm;
+import com.nimbusds.jwt.JWT;
+import com.nimbusds.jwt.JWTClaimsSet;
+
+import com.nimbusds.oauth2.sdk.ParseException;
+import com.nimbusds.oauth2.sdk.ResponseType;
+import com.nimbusds.oauth2.sdk.Scope;
+import com.nimbusds.oauth2.sdk.id.ClientID;
+import com.nimbusds.oauth2.sdk.id.Issuer;
+import com.nimbusds.oauth2.sdk.id.State;
+import com.nimbusds.openid.connect.sdk.AuthenticationRequest;
+import com.nimbusds.openid.connect.sdk.LogoutRequest;
+import com.nimbusds.openid.connect.sdk.Nonce;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ElasticsearchSecurityException;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.settings.SecureString;
+import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.SettingsException;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.license.XPackLicenseState;
+import org.elasticsearch.watcher.ResourceWatcherService;
+import org.elasticsearch.xpack.core.XPackSettings;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectLogoutResponse;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectPrepareAuthenticationResponse;
+import org.elasticsearch.xpack.core.security.authc.AuthenticationResult;
+import org.elasticsearch.xpack.core.security.authc.AuthenticationToken;
+import org.elasticsearch.xpack.core.security.authc.Realm;
+import org.elasticsearch.xpack.core.security.authc.RealmConfig;
+import org.elasticsearch.xpack.core.security.authc.RealmSettings;
+import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings;
+import org.elasticsearch.xpack.core.security.user.User;
+import org.elasticsearch.xpack.core.ssl.SSLService;
+import org.elasticsearch.xpack.security.authc.TokenService;
+import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport;
+import org.elasticsearch.xpack.security.authc.support.UserRoleMapper;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.DN_CLAIM;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.GROUPS_CLAIM;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.MAIL_CLAIM;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.NAME_CLAIM;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_ENDSESSION_ENDPOINT;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_ISSUER;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_JWKSET_PATH;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_NAME;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_USERINFO_ENDPOINT;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.POPULATE_USER_METADATA;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.PRINCIPAL_CLAIM;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_CLIENT_ID;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_CLIENT_SECRET;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_POST_LOGOUT_REDIRECT_URI;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_REDIRECT_URI;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_RESPONSE_TYPE;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES;
+import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.RP_SIGNATURE_ALGORITHM;
+
+public class OpenIdConnectRealm extends Realm implements Releasable {
+
+    public static final String CONTEXT_TOKEN_DATA = "_oidc_tokendata";
+    private final OpenIdConnectProviderConfiguration opConfiguration;
+    private final RelyingPartyConfiguration rpConfiguration;
+    private final OpenIdConnectAuthenticator openIdConnectAuthenticator;
+    private final ClaimParser principalAttribute;
+    private final ClaimParser groupsAttribute;
+    private final ClaimParser dnAttribute;
+    private final ClaimParser nameAttribute;
+    private final ClaimParser mailAttribute;
+    private final Boolean populateUserMetadata;
+    private final UserRoleMapper roleMapper;
+
+    private DelegatedAuthorizationSupport delegatedRealms;
+
+    public OpenIdConnectRealm(RealmConfig config, SSLService sslService, UserRoleMapper roleMapper,
+                              ResourceWatcherService watcherService) {
+        super(config);
+        this.roleMapper = roleMapper;
+        this.rpConfiguration = buildRelyingPartyConfiguration(config);
+        this.opConfiguration = buildOpenIdConnectProviderConfiguration(config);
+        this.principalAttribute = ClaimParser.forSetting(logger, PRINCIPAL_CLAIM, config, true);
+        this.groupsAttribute = ClaimParser.forSetting(logger, GROUPS_CLAIM, config, false);
+        this.dnAttribute = ClaimParser.forSetting(logger, DN_CLAIM, config, false);
+        this.nameAttribute = ClaimParser.forSetting(logger, NAME_CLAIM, config, false);
+        this.mailAttribute = ClaimParser.forSetting(logger, MAIL_CLAIM, config, false);
+        this.populateUserMetadata = config.getSetting(POPULATE_USER_METADATA);
+        if (TokenService.isTokenServiceEnabled(config.settings()) == false) {
+            throw new IllegalStateException("OpenID Connect Realm requires that the token service be enabled ("
+                + XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey() + ")");
+        }
+        this.openIdConnectAuthenticator =
+            new OpenIdConnectAuthenticator(config, opConfiguration, rpConfiguration, sslService, watcherService);
+    }
+
+    // For testing
+    OpenIdConnectRealm(RealmConfig config, OpenIdConnectAuthenticator authenticator, UserRoleMapper roleMapper) {
+        super(config);
+        this.roleMapper = roleMapper;
+        this.rpConfiguration = buildRelyingPartyConfiguration(config);
+        this.opConfiguration = buildOpenIdConnectProviderConfiguration(config);
+        this.openIdConnectAuthenticator = authenticator;
+        this.principalAttribute = ClaimParser.forSetting(logger, PRINCIPAL_CLAIM, config, true);
+        this.groupsAttribute = ClaimParser.forSetting(logger, GROUPS_CLAIM, config, false);
+        this.dnAttribute = ClaimParser.forSetting(logger, DN_CLAIM, config, false);
+        this.nameAttribute = ClaimParser.forSetting(logger, NAME_CLAIM, config, false);
+        this.mailAttribute = ClaimParser.forSetting(logger, MAIL_CLAIM, config, false);
+        this.populateUserMetadata = config.getSetting(POPULATE_USER_METADATA);
+    }
+
+    @Override
+    public void initialize(Iterable<Realm> realms, XPackLicenseState licenseState) {
+        if (delegatedRealms != null) {
+            throw new IllegalStateException("Realm has already been initialized");
+        }
+        delegatedRealms = new DelegatedAuthorizationSupport(realms, config, licenseState);
+    }
+
+    @Override
+    public boolean supports(AuthenticationToken token) {
+        return token instanceof OpenIdConnectToken;
+    }
+
+    @Override
+    public AuthenticationToken token(ThreadContext context) {
+        return null;
+    }
+
+    @Override
+    public void authenticate(AuthenticationToken token, ActionListener<AuthenticationResult> listener) {
+        if (token instanceof OpenIdConnectToken) {
+            OpenIdConnectToken oidcToken = (OpenIdConnectToken) token;
+            openIdConnectAuthenticator.authenticate(oidcToken, ActionListener.wrap(
+                jwtClaimsSet -> {
+                    buildUserFromClaims(jwtClaimsSet, listener);
+                },
+                e -> {
+                    logger.debug("Failed to consume the OpenIdConnectToken ", e);
+                    if (e instanceof ElasticsearchSecurityException) {
+                        listener.onResponse(AuthenticationResult.unsuccessful("Failed to authenticate user with OpenID Connect", e));
+                    } else {
+                        listener.onFailure(e);
+                    }
+                }));
+        } else {
+            listener.onResponse(AuthenticationResult.notHandled());
+        }
+    }
+
+    @Override
+    public void lookupUser(String username, ActionListener<User> listener) {
+        listener.onResponse(null);
+    }
+
+
+    private void buildUserFromClaims(JWTClaimsSet claims, ActionListener<AuthenticationResult> authResultListener) {
+        final String principal = principalAttribute.getClaimValue(claims);
+        if (Strings.isNullOrEmpty(principal)) {
+            authResultListener.onResponse(AuthenticationResult.unsuccessful(
+                principalAttribute + "not found in " + claims.toJSONObject(), null));
+            return;
+        }
+
+        final Map<String, Object> tokenMetadata = new HashMap<>();
+        tokenMetadata.put("id_token_hint", claims.getClaim("id_token_hint"));
+        ActionListener<AuthenticationResult> wrappedAuthResultListener = ActionListener.wrap(auth -> {
+            if (auth.isAuthenticated()) {
+                // Add the ID Token as metadata on the authentication, so that it can be used for logout requests
+                Map<String, Object> metadata = new HashMap<>(auth.getMetadata());
+                metadata.put(CONTEXT_TOKEN_DATA, tokenMetadata);
+                auth = AuthenticationResult.success(auth.getUser(), metadata);
+            }
+            authResultListener.onResponse(auth);
+        }, authResultListener::onFailure);
+
+        if (delegatedRealms.hasDelegation()) {
+            delegatedRealms.resolve(principal, wrappedAuthResultListener);
+            return;
+        }
+
+        final Map<String, Object> userMetadata = new HashMap<>();
+        if (populateUserMetadata) {
+            Map<String, Object> claimsMap = claims.getClaims();
+            /*
+             * We whitelist the Types that we want to parse as metadata from the Claims, explicitly filtering out {@link Date}s
+             */
+            Set<Map.Entry> allowedEntries = claimsMap.entrySet().stream().filter(entry -> {
+                Object v = entry.getValue();
+                return (v instanceof String || v instanceof Boolean || v instanceof Number || v instanceof Collections);
+            }).collect(Collectors.toSet());
+            for (Map.Entry entry : allowedEntries) {
+                userMetadata.put("oidc(" + entry.getKey() + ")", entry.getValue());
+            }
+        }
+        final List<String> groups = groupsAttribute.getClaimValues(claims);
+        final String dn = dnAttribute.getClaimValue(claims);
+        final String mail = mailAttribute.getClaimValue(claims);
+        final String name = nameAttribute.getClaimValue(claims);
+        UserRoleMapper.UserData userData = new UserRoleMapper.UserData(principal, dn, groups, userMetadata, config);
+        roleMapper.resolveRoles(userData, ActionListener.wrap(roles -> {
+            final User user = new User(principal, roles.toArray(Strings.EMPTY_ARRAY), name, mail, userMetadata, true);
+            wrappedAuthResultListener.onResponse(AuthenticationResult.success(user));
+        }, wrappedAuthResultListener::onFailure));
+
+    }
+
+    private RelyingPartyConfiguration buildRelyingPartyConfiguration(RealmConfig config) {
+        final String redirectUriString = require(config, RP_REDIRECT_URI);
+        final URI redirectUri;
+        try {
+            redirectUri = new URI(redirectUriString);
+        } catch (URISyntaxException e) {
+            // This should never happen as it's already validated in the settings
+            throw new SettingsException("Invalid URI:" + RP_REDIRECT_URI.getKey(), e);
+        }
+        final String postLogoutRedirectUriString = config.getSetting(RP_POST_LOGOUT_REDIRECT_URI);
+        final URI postLogoutRedirectUri;
+        try {
+            postLogoutRedirectUri = new URI(postLogoutRedirectUriString);
+        } catch (URISyntaxException e) {
+            // This should never happen as it's already validated in the settings
+            throw new SettingsException("Invalid URI:" + RP_POST_LOGOUT_REDIRECT_URI.getKey(), e);
+        }
+        final ClientID clientId = new ClientID(require(config, RP_CLIENT_ID));
+        final SecureString clientSecret = config.getSetting(RP_CLIENT_SECRET);
+        final ResponseType responseType;
+        try {
+            // This should never happen as it's already validated in the settings
+            responseType = ResponseType.parse(require(config, RP_RESPONSE_TYPE));
+        } catch (ParseException e) {
+            throw new SettingsException("Invalid value for " + RP_RESPONSE_TYPE.getKey(), e);
+        }
+
+        final Scope requestedScope = new Scope(config.getSetting(RP_REQUESTED_SCOPES).toArray(Strings.EMPTY_ARRAY));
+        if (requestedScope.contains("openid") == false) {
+            requestedScope.add("openid");
+        }
+        final JWSAlgorithm signatureAlgorithm = JWSAlgorithm.parse(require(config, RP_SIGNATURE_ALGORITHM));
+
+        return new RelyingPartyConfiguration(clientId, clientSecret, redirectUri, responseType, requestedScope,
+            signatureAlgorithm, postLogoutRedirectUri);
+    }
+
+    private OpenIdConnectProviderConfiguration buildOpenIdConnectProviderConfiguration(RealmConfig config) {
+        String providerName = require(config, OP_NAME);
+        Issuer issuer = new Issuer(require(config, OP_ISSUER));
+
+        String jwkSetUrl = require(config, OP_JWKSET_PATH);
+
+        URI authorizationEndpoint;
+        try {
+            authorizationEndpoint = new URI(require(config, OP_AUTHORIZATION_ENDPOINT));
+        } catch (URISyntaxException e) {
+            // This should never happen as it's already validated in the settings
+            throw new SettingsException("Invalid URI: " + OP_AUTHORIZATION_ENDPOINT.getKey(), e);
+        }
+        URI tokenEndpoint;
+        try {
+            tokenEndpoint = new URI(require(config, OP_TOKEN_ENDPOINT));
+        } catch (URISyntaxException e) {
+            // This should never happen as it's already validated in the settings
+            throw new SettingsException("Invalid URL: " + OP_TOKEN_ENDPOINT.getKey(), e);
+        }
+        URI userinfoEndpoint;
+        try {
+            userinfoEndpoint = (config.getSetting(OP_USERINFO_ENDPOINT, () -> null) == null) ? null :
+                new URI(config.getSetting(OP_USERINFO_ENDPOINT, () -> null));
+        } catch (URISyntaxException e) {
+            // This should never happen as it's already validated in the settings
+            throw new SettingsException("Invalid URI: " + OP_USERINFO_ENDPOINT.getKey(), e);
+        }
+        URI endsessionEndpoint;
+        try {
+            endsessionEndpoint = (config.getSetting(OP_ENDSESSION_ENDPOINT, () -> null) == null) ? null :
+                new URI(config.getSetting(OP_ENDSESSION_ENDPOINT, () -> null));
+        } catch (URISyntaxException e) {
+            // This should never happen as it's already validated in the settings
+            throw new SettingsException("Invalid URI: " + OP_ENDSESSION_ENDPOINT.getKey(), e);
+        }
+
+        return new OpenIdConnectProviderConfiguration(providerName, issuer, jwkSetUrl, authorizationEndpoint, tokenEndpoint,
+            userinfoEndpoint, endsessionEndpoint);
+    }
+
+    private static String require(RealmConfig config, Setting.AffixSetting<String> setting) {
+        final String value = config.getSetting(setting);
+        if (value.isEmpty()) {
+            throw new SettingsException("The configuration setting [" + RealmSettings.getFullSettingKey(config, setting)
+                + "] is required");
+        }
+        return value;
+    }
+
+    /**
+     * Creates the URI for an OIDC Authentication Request from the realm configuration using URI Query String Serialization and
+     * possibly generates a state parameter and a nonce. It then returns the URI, state and nonce encapsulated in a
+     * {@link OpenIdConnectPrepareAuthenticationResponse}. A facilitator can provide a state and a nonce parameter in two cases:
+     * <ul>
+     *     <li>In case of Kibana, it allows for a better UX by ensuring that all requests to an OpenID Connect Provider within
+     *     the same browser context (even across tabs) will use the same state and nonce values.</li>
+     *     <li>In case of custom facilitators, the implementer might require/support generating the state parameter in order
+     *     to tie this to an anti-XSRF token.</li>
+     * </ul>
+     *
+     *
+     * @param existingState An existing state that can be reused or null if we need to generate one
+     * @param existingNonce An existing nonce that can be reused or null if we need to generate one
+     * @param loginHint A String with a login hint to add to the authentication request in case of a 3rd party initiated login
+     *
+     * @return an {@link OpenIdConnectPrepareAuthenticationResponse}
+     */
+    public OpenIdConnectPrepareAuthenticationResponse buildAuthenticationRequestUri(@Nullable String existingState,
+                                                                                    @Nullable String existingNonce,
+                                                                                    @Nullable String loginHint) {
+        final State state = existingState != null ? new State(existingState) : new State();
+        final Nonce nonce = existingNonce != null ? new Nonce(existingNonce) : new Nonce();
+        final AuthenticationRequest.Builder builder = new AuthenticationRequest.Builder(rpConfiguration.getResponseType(),
+            rpConfiguration.getRequestedScope(),
+            rpConfiguration.getClientId(),
+            rpConfiguration.getRedirectUri())
+            .endpointURI(opConfiguration.getAuthorizationEndpoint())
+            .state(state)
+            .nonce(nonce);
+        if (Strings.hasText(loginHint)) {
+            builder.loginHint(loginHint);
+        }
+        return new OpenIdConnectPrepareAuthenticationResponse(builder.build().toURI().toString(),
+            state.getValue(), nonce.getValue());
+    }
+
+    public boolean isIssuerValid(String issuer) {
+        return this.opConfiguration.getIssuer().getValue().equals(issuer);
+    }
+
+    public OpenIdConnectLogoutResponse buildLogoutResponse(JWT idTokenHint) {
+        if (opConfiguration.getEndsessionEndpoint() != null) {
+            final State state = new State();
+            final LogoutRequest logoutRequest = new LogoutRequest(opConfiguration.getEndsessionEndpoint(), idTokenHint,
+                rpConfiguration.getPostLogoutRedirectUri(), state);
+            return new OpenIdConnectLogoutResponse(logoutRequest.toURI().toString());
+        } else {
+            return new OpenIdConnectLogoutResponse((String) null);
+        }
+    }
+
+    @Override
+    public void close() {
+        openIdConnectAuthenticator.close();
+    }
+
+    static final class ClaimParser {
+        private final String name;
+        private final Function<JWTClaimsSet, List<String>> parser;
+
+        ClaimParser(String name, Function<JWTClaimsSet, List<String>> parser) {
+            this.name = name;
+            this.parser = parser;
+        }
+
+        List<String> getClaimValues(JWTClaimsSet claims) {
+            return parser.apply(claims);
+        }
+
+        String getClaimValue(JWTClaimsSet claims) {
+            List<String> claimValues = parser.apply(claims);
+            if (claimValues == null || claimValues.isEmpty()) {
+                return null;
+            } else {
+                return claimValues.get(0);
+            }
+        }
+
+        @Override
+        public String toString() {
+            return name;
+        }
+
+        static ClaimParser forSetting(Logger logger, OpenIdConnectRealmSettings.ClaimSetting setting, RealmConfig realmConfig,
+                                      boolean required) {
+
+            if (realmConfig.hasSetting(setting.getClaim())) {
+                String claimName = realmConfig.getSetting(setting.getClaim());
+                if (realmConfig.hasSetting(setting.getPattern())) {
+                    Pattern regex = Pattern.compile(realmConfig.getSetting(setting.getPattern()));
+                    return new ClaimParser(
+                        "OpenID Connect Claim [" + claimName + "] with pattern [" + regex.pattern() + "] for ["
+                            + setting.name(realmConfig) + "]",
+                        claims -> {
+                            Object claimValueObject = claims.getClaim(claimName);
+                            List<String> values;
+                            if (claimValueObject == null) {
+                                values = Collections.emptyList();
+                            } else if (claimValueObject instanceof String) {
+                                values = Collections.singletonList((String) claimValueObject);
+                            } else if (claimValueObject instanceof List) {
+                                values = (List<String>) claimValueObject;
+                            } else {
+                                throw new SettingsException("Setting [" + RealmSettings.getFullSettingKey(realmConfig, setting.getClaim())
+                                    + " expects a claim with String or a String Array value but found a "
+                                    + claimValueObject.getClass().getName());
+                            }
+                            return values.stream().map(s -> {
+                                final Matcher matcher = regex.matcher(s);
+                                if (matcher.find() == false) {
+                                    logger.debug("OpenID Connect Claim [{}] is [{}], which does not match [{}]",
+                                        claimName, s, regex.pattern());
+                                    return null;
+                                }
+                                final String value = matcher.group(1);
+                                if (Strings.isNullOrEmpty(value)) {
+                                    logger.debug("OpenID Connect Claim [{}] is [{}], which does match [{}] but group(1) is empty",
+                                        claimName, s, regex.pattern());
+                                    return null;
+                                }
+                                return value;
+                            }).filter(Objects::nonNull).collect(Collectors.toList());
+                        });
+                } else {
+                    return new ClaimParser(
+                        "OpenID Connect Claim [" + claimName + "] for [" + setting.name(realmConfig) + "]",
+                        claims -> {
+                            Object claimValueObject = claims.getClaim(claimName);
+                            if (claimValueObject == null) {
+                                return Collections.emptyList();
+                            } else if (claimValueObject instanceof String) {
+                                return Collections.singletonList((String) claimValueObject);
+                            } else if (claimValueObject instanceof List == false) {
+                                throw new SettingsException("Setting [" + RealmSettings.getFullSettingKey(realmConfig, setting.getClaim())
+                                    + " expects a claim with String or a String Array value but found a "
+                                    + claimValueObject.getClass().getName());
+                            }
+                            return (List<String>) claimValueObject;
+                        });
+                }
+            } else if (required) {
+                throw new SettingsException("Setting [" + RealmSettings.getFullSettingKey(realmConfig, setting.getClaim())
+                    + "] is required");
+            } else if (realmConfig.hasSetting(setting.getPattern())) {
+                throw new SettingsException("Setting [" + RealmSettings.getFullSettingKey(realmConfig, setting.getPattern())
+                    + "] cannot be set unless [" + RealmSettings.getFullSettingKey(realmConfig, setting.getClaim())
+                    + "] is also set");
+            } else {
+                return new ClaimParser("No OpenID Connect Claim for [" + setting.name(realmConfig) + "]",
+                    attributes -> Collections.emptyList());
+            }
+        }
+    }
+}
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectToken.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectToken.java
new file mode 100644
index 00000000000..ab61fd8fb9d
--- /dev/null
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectToken.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.authc.oidc;
+
+import com.nimbusds.oauth2.sdk.id.State;
+import com.nimbusds.openid.connect.sdk.Nonce;
+import org.elasticsearch.xpack.core.security.authc.AuthenticationToken;
+
+/**
+ * A {@link AuthenticationToken} to hold OpenID Connect related content.
+ * Depending on the flow the token can contain only a code ( oAuth2 authorization code
+ * grant flow ) or even an Identity Token ( oAuth2 implicit flow )
+ */
+public class OpenIdConnectToken implements AuthenticationToken {
+
+    private String redirectUrl;
+    private State state;
+    private Nonce nonce;
+
+    /**
+     * @param redirectUrl The URI where the OP redirected the browser after the authentication event at the OP. This is passed as is from
+     *                    the facilitator entity (i.e. Kibana), so it is URL Encoded. It contains either the code or the id_token itself
+     *                    depending on the flow used
+     * @param state       The state value that we generated or the facilitator provided for this specific flow and should be stored at the
+     *                    user's session with the facilitator.
+     * @param nonce       The nonce value that we generated or the facilitator provided for this specific flow and should be stored at the
+     *                    user's session with the facilitator.
+     */
+    public OpenIdConnectToken(String redirectUrl, State state, Nonce nonce) {
+        this.redirectUrl = redirectUrl;
+        this.state = state;
+        this.nonce = nonce;
+    }
+
+    @Override
+    public String principal() {
+        return "<OIDC Token>";
+    }
+
+    @Override
+    public Object credentials() {
+        return redirectUrl;
+    }
+
+    @Override
+    public void clearCredentials() {
+        this.redirectUrl = null;
+    }
+
+    public State getState() {
+        return state;
+    }
+
+    public Nonce getNonce() {
+        return nonce;
+    }
+
+    public String getRedirectUrl() {
+        return redirectUrl;
+    }
+
+    public String toString() {
+        return getClass().getSimpleName() + "{ redirectUrl=" + redirectUrl + ", state=" + state + ", nonce=" + nonce + "}";
+    }
+}
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/RelyingPartyConfiguration.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/RelyingPartyConfiguration.java
new file mode 100644
index 00000000000..ed67974c0b0
--- /dev/null
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/RelyingPartyConfiguration.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.authc.oidc;
+
+import com.nimbusds.jose.JWSAlgorithm;
+import com.nimbusds.oauth2.sdk.ResponseType;
+import com.nimbusds.oauth2.sdk.Scope;
+import com.nimbusds.oauth2.sdk.id.ClientID;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.SecureString;
+
+import java.net.URI;
+import java.util.Objects;
+
+/**
+ * A Class that contains all the OpenID Connect Relying Party configuration
+ */
+public class RelyingPartyConfiguration {
+    private final ClientID clientId;
+    private final SecureString clientSecret;
+    private final URI redirectUri;
+    private final ResponseType responseType;
+    private final Scope requestedScope;
+    private final JWSAlgorithm signatureAlgorithm;
+    private final URI postLogoutRedirectUri;
+
+    public RelyingPartyConfiguration(ClientID clientId, SecureString clientSecret, URI redirectUri, ResponseType responseType,
+                                     Scope requestedScope, JWSAlgorithm algorithm, @Nullable URI postLogoutRedirectUri) {
+        this.clientId = Objects.requireNonNull(clientId, "clientId must be provided");
+        this.clientSecret = Objects.requireNonNull(clientSecret, "clientSecret must be provided");
+        this.redirectUri = Objects.requireNonNull(redirectUri, "redirectUri must be provided");
+        this.responseType = Objects.requireNonNull(responseType, "responseType must be provided");
+        this.requestedScope = Objects.requireNonNull(requestedScope, "responseType must be provided");
+        this.signatureAlgorithm = Objects.requireNonNull(algorithm, "algorithm must be provided");
+        this.postLogoutRedirectUri = postLogoutRedirectUri;
+    }
+
+    public ClientID getClientId() {
+        return clientId;
+    }
+
+    public SecureString getClientSecret() {
+        return clientSecret;
+    }
+
+    public URI getRedirectUri() {
+        return redirectUri;
+    }
+
+    public ResponseType getResponseType() {
+        return responseType;
+    }
+
+    public Scope getRequestedScope() {
+        return requestedScope;
+    }
+
+    public JWSAlgorithm getSignatureAlgorithm() {
+        return signatureAlgorithm;
+    }
+
+    public URI getPostLogoutRedirectUri() {
+        return postLogoutRedirectUri;
+    }
+}
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/OpenIdConnectBaseRestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/OpenIdConnectBaseRestHandler.java
new file mode 100644
index 00000000000..008b5d0676e
--- /dev/null
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/OpenIdConnectBaseRestHandler.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.rest.action.oidc;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.license.LicenseUtils;
+import org.elasticsearch.license.XPackLicenseState;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings;
+import org.elasticsearch.xpack.security.authc.Realms;
+import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler;
+
+public abstract class OpenIdConnectBaseRestHandler extends SecurityBaseRestHandler {
+
+    private static final String OIDC_REALM_TYPE = OpenIdConnectRealmSettings.TYPE;
+
+    /**
+     * @param settings     the node's settings
+     * @param licenseState the license state that will be used to determine if security is licensed
+     */
+    protected OpenIdConnectBaseRestHandler(Settings settings, XPackLicenseState licenseState) {
+        super(settings, licenseState);
+    }
+
+    @Override
+    protected Exception checkFeatureAvailable(RestRequest request) {
+        Exception failedFeature = super.checkFeatureAvailable(request);
+        if (failedFeature != null) {
+            return failedFeature;
+        } else if (Realms.isRealmTypeAvailable(licenseState.allowedRealmType(), OIDC_REALM_TYPE)) {
+            return null;
+        } else {
+            logger.info("The '{}' realm is not available under the current license", OIDC_REALM_TYPE);
+            return LicenseUtils.newComplianceException(OIDC_REALM_TYPE);
+        }
+    }
+}
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/RestOpenIdConnectAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/RestOpenIdConnectAuthenticateAction.java
new file mode 100644
index 00000000000..2ac75872b7c
--- /dev/null
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/RestOpenIdConnectAuthenticateAction.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.rest.action.oidc;
+
+import org.elasticsearch.client.node.NodeClient;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ObjectParser;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.license.XPackLicenseState;
+import org.elasticsearch.rest.BytesRestResponse;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.RestResponse;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.action.RestBuilderListener;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectAuthenticateAction;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectAuthenticateRequest;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectAuthenticateResponse;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+
+/**
+ * Rest handler that authenticates the user based on the information provided as parameters of the redirect_uri
+ */
+public class RestOpenIdConnectAuthenticateAction extends OpenIdConnectBaseRestHandler {
+
+    static final ObjectParser<OpenIdConnectAuthenticateRequest, Void> PARSER = new ObjectParser<>("oidc_authn",
+        OpenIdConnectAuthenticateRequest::new);
+
+    static {
+        PARSER.declareString(OpenIdConnectAuthenticateRequest::setRedirectUri, new ParseField("redirect_uri"));
+        PARSER.declareString(OpenIdConnectAuthenticateRequest::setState, new ParseField("state"));
+        PARSER.declareString(OpenIdConnectAuthenticateRequest::setNonce, new ParseField("nonce"));
+    }
+
+    public RestOpenIdConnectAuthenticateAction(Settings settings, RestController controller, XPackLicenseState licenseState) {
+        super(settings, licenseState);
+        controller.registerHandler(POST, "/_security/oidc/authenticate", this);
+    }
+
+    @Override
+    protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException {
+        try (XContentParser parser = request.contentParser()) {
+            final OpenIdConnectAuthenticateRequest authenticateRequest = PARSER.parse(parser, null);
+            logger.trace("OIDC Authenticate: " + authenticateRequest);
+            return channel -> client.execute(OpenIdConnectAuthenticateAction.INSTANCE, authenticateRequest,
+                new RestBuilderListener<OpenIdConnectAuthenticateResponse>(channel) {
+                    @Override
+                    public RestResponse buildResponse(OpenIdConnectAuthenticateResponse response, XContentBuilder builder)
+                        throws Exception {
+                        builder.startObject()
+                            .field("username", response.getPrincipal())
+                            .field("access_token", response.getAccessTokenString())
+                            .field("refresh_token", response.getRefreshTokenString())
+                            .field("expires_in", response.getExpiresIn().seconds())
+                            .endObject();
+                        return new BytesRestResponse(RestStatus.OK, builder);
+                    }
+                });
+        }
+    }
+
+    @Override
+    public String getName() {
+        return "security_oidc_authenticate_action";
+    }
+}
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/RestOpenIdConnectLogoutAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/RestOpenIdConnectLogoutAction.java
new file mode 100644
index 00000000000..e098e14c423
--- /dev/null
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/RestOpenIdConnectLogoutAction.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.rest.action.oidc;
+
+import org.elasticsearch.client.node.NodeClient;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ObjectParser;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.license.XPackLicenseState;
+import org.elasticsearch.rest.BytesRestResponse;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.RestResponse;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.action.RestBuilderListener;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectLogoutAction;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectLogoutRequest;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectLogoutResponse;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+
+/**
+ * Rest handler that invalidates a security token for the given OpenID Connect realm and if the configuration of
+ * the realm supports it, generates a redirect to the `end_session_endpoint` of the OpenID Connect Provider.
+ */
+public class RestOpenIdConnectLogoutAction extends OpenIdConnectBaseRestHandler {
+
+    static final ObjectParser<OpenIdConnectLogoutRequest, Void> PARSER = new ObjectParser<>("oidc_logout",
+        OpenIdConnectLogoutRequest::new);
+
+    static {
+        PARSER.declareString(OpenIdConnectLogoutRequest::setToken, new ParseField("token"));
+        PARSER.declareString(OpenIdConnectLogoutRequest::setRefreshToken, new ParseField("refresh_token"));
+    }
+
+    public RestOpenIdConnectLogoutAction(Settings settings, RestController controller, XPackLicenseState licenseState) {
+        super(settings, licenseState);
+        controller.registerHandler(POST, "/_security/oidc/logout", this);
+    }
+
+    @Override
+    protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException {
+        try (XContentParser parser = request.contentParser()) {
+            final OpenIdConnectLogoutRequest logoutRequest = PARSER.parse(parser, null);
+            return channel -> client.execute(OpenIdConnectLogoutAction.INSTANCE, logoutRequest,
+                new RestBuilderListener<OpenIdConnectLogoutResponse>(channel) {
+                    @Override
+                    public RestResponse buildResponse(OpenIdConnectLogoutResponse response, XContentBuilder builder) throws Exception {
+                        builder.startObject();
+                        builder.field("redirect", response.getEndSessionUrl());
+                        builder.endObject();
+                        return new BytesRestResponse(RestStatus.OK, builder);
+                    }
+                });
+        }
+    }
+
+    @Override
+    public String getName() {
+        return "security_oidc_logout_action";
+    }
+}
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/RestOpenIdConnectPrepareAuthenticationAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/RestOpenIdConnectPrepareAuthenticationAction.java
new file mode 100644
index 00000000000..60786c82b56
--- /dev/null
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oidc/RestOpenIdConnectPrepareAuthenticationAction.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.rest.action.oidc;
+
+import org.elasticsearch.client.node.NodeClient;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ObjectParser;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.license.XPackLicenseState;
+import org.elasticsearch.rest.BytesRestResponse;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.RestResponse;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.action.RestBuilderListener;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectPrepareAuthenticationAction;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectPrepareAuthenticationRequest;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectPrepareAuthenticationResponse;
+
+import java.io.IOException;
+
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+
+/**
+ * Generates an oAuth 2.0 authentication request as a URL string and returns it to the REST client.
+ */
+public class RestOpenIdConnectPrepareAuthenticationAction extends OpenIdConnectBaseRestHandler {
+
+    static final ObjectParser<OpenIdConnectPrepareAuthenticationRequest, Void> PARSER = new ObjectParser<>("oidc_prepare_authentication",
+        OpenIdConnectPrepareAuthenticationRequest::new);
+
+    static {
+        PARSER.declareString(OpenIdConnectPrepareAuthenticationRequest::setRealmName, new ParseField("realm"));
+        PARSER.declareString(OpenIdConnectPrepareAuthenticationRequest::setIssuer, new ParseField("iss"));
+        PARSER.declareString(OpenIdConnectPrepareAuthenticationRequest::setLoginHint, new ParseField("login_hint"));
+        PARSER.declareString(OpenIdConnectPrepareAuthenticationRequest::setState, new ParseField("state"));
+        PARSER.declareString(OpenIdConnectPrepareAuthenticationRequest::setNonce, new ParseField("nonce"));
+    }
+
+    public RestOpenIdConnectPrepareAuthenticationAction(Settings settings, RestController controller, XPackLicenseState licenseState) {
+        super(settings, licenseState);
+        controller.registerHandler(POST, "/_security/oidc/prepare", this);
+    }
+
+    @Override
+    protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException {
+        try (XContentParser parser = request.contentParser()) {
+            final OpenIdConnectPrepareAuthenticationRequest prepareAuthenticationRequest = PARSER.parse(parser, null);
+            logger.trace("OIDC Prepare Authentication: " + prepareAuthenticationRequest);
+            return channel -> client.execute(OpenIdConnectPrepareAuthenticationAction.INSTANCE, prepareAuthenticationRequest,
+                new RestBuilderListener<OpenIdConnectPrepareAuthenticationResponse>(channel) {
+                    @Override
+                    public RestResponse buildResponse(OpenIdConnectPrepareAuthenticationResponse response, XContentBuilder builder)
+                        throws Exception {
+                        logger.trace("OIDC Prepare Authentication Response: " + response);
+                        return new BytesRestResponse(RestStatus.OK, response.toXContent(builder, request));
+                    }
+                });
+        }
+    }
+
+    @Override
+    public String getName() {
+        return "security_oidc_prepare_authentication_action";
+    }
+}
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/OpenIdConnectAuthenticateRequestTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/OpenIdConnectAuthenticateRequestTests.java
new file mode 100644
index 00000000000..08bc96c40b4
--- /dev/null
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/OpenIdConnectAuthenticateRequestTests.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.action.oidc;
+
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectAuthenticateRequest;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
+public class OpenIdConnectAuthenticateRequestTests extends ESTestCase {
+
+    public void testSerialization() throws IOException {
+        final OpenIdConnectAuthenticateRequest request = new OpenIdConnectAuthenticateRequest();
+        final String nonce = randomAlphaOfLengthBetween(8, 12);
+        final String state = randomAlphaOfLengthBetween(8, 12);
+        final String redirectUri = "https://rp.com/cb?code=thisisacode&state=" + state;
+        request.setRedirectUri(redirectUri);
+        request.setState(state);
+        request.setNonce(nonce);
+        final BytesStreamOutput out = new BytesStreamOutput();
+        request.writeTo(out);
+
+        final OpenIdConnectAuthenticateRequest unserialized = new OpenIdConnectAuthenticateRequest(out.bytes().streamInput());
+        assertThat(unserialized.getRedirectUri(), equalTo(redirectUri));
+        assertThat(unserialized.getState(), equalTo(state));
+        assertThat(unserialized.getNonce(), equalTo(nonce));
+    }
+
+    public void testValidation() {
+        final OpenIdConnectAuthenticateRequest request = new OpenIdConnectAuthenticateRequest();
+        final ActionRequestValidationException validation = request.validate();
+        assertNotNull(validation);
+        assertThat(validation.validationErrors().size(), equalTo(3));
+        assertThat(validation.validationErrors().get(0), containsString("state parameter is missing"));
+        assertThat(validation.validationErrors().get(1), containsString("nonce parameter is missing"));
+        assertThat(validation.validationErrors().get(2), containsString("redirect_uri parameter is missing"));
+
+        final OpenIdConnectAuthenticateRequest request2 = new OpenIdConnectAuthenticateRequest();
+        request2.setRedirectUri("https://rp.company.com/cb?code=abc");
+        request2.setState(randomAlphaOfLengthBetween(8, 12));
+        final ActionRequestValidationException validation2 = request2.validate();
+        assertNotNull(validation2);
+        assertThat(validation2.validationErrors().size(), equalTo(1));
+        assertThat(validation2.validationErrors().get(0), containsString("nonce parameter is missing"));
+    }
+}
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/OpenIdConnectPrepareAuthenticationRequestTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/OpenIdConnectPrepareAuthenticationRequestTests.java
new file mode 100644
index 00000000000..e668008deb9
--- /dev/null
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/OpenIdConnectPrepareAuthenticationRequestTests.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.action.oidc;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectPrepareAuthenticationRequest;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
+public class OpenIdConnectPrepareAuthenticationRequestTests extends ESTestCase {
+
+    public void testSerialization() throws IOException {
+        final OpenIdConnectPrepareAuthenticationRequest request = new OpenIdConnectPrepareAuthenticationRequest();
+        request.setRealmName("oidc-realm1");
+        final BytesStreamOutput out = new BytesStreamOutput();
+        request.writeTo(out);
+
+        final OpenIdConnectPrepareAuthenticationRequest deserialized =
+            new OpenIdConnectPrepareAuthenticationRequest(out.bytes().streamInput());
+        assertThat(deserialized.getRealmName(), equalTo("oidc-realm1"));
+
+        final OpenIdConnectPrepareAuthenticationRequest request2 = new OpenIdConnectPrepareAuthenticationRequest();
+        request2.setIssuer("https://op.company.org/");
+        final BytesStreamOutput out2 = new BytesStreamOutput();
+        request2.writeTo(out2);
+
+        final OpenIdConnectPrepareAuthenticationRequest deserialized2 =
+            new OpenIdConnectPrepareAuthenticationRequest(out2.bytes().streamInput());
+        assertThat(deserialized2.getIssuer(), equalTo("https://op.company.org/"));
+    }
+
+    public void testSerializationWithStateAndNonce() throws IOException {
+        final OpenIdConnectPrepareAuthenticationRequest request = new OpenIdConnectPrepareAuthenticationRequest();
+        final String nonce = randomAlphaOfLengthBetween(8, 12);
+        final String state = randomAlphaOfLengthBetween(8, 12);
+        request.setRealmName("oidc-realm1");
+        request.setNonce(nonce);
+        request.setState(state);
+        final BytesStreamOutput out = new BytesStreamOutput();
+        request.writeTo(out);
+
+        final OpenIdConnectPrepareAuthenticationRequest deserialized =
+            new OpenIdConnectPrepareAuthenticationRequest(out.bytes().streamInput());
+        assertThat(deserialized.getRealmName(), equalTo("oidc-realm1"));
+        assertThat(deserialized.getState(), equalTo(state));
+        assertThat(deserialized.getNonce(), equalTo(nonce));
+    }
+
+    public void testValidation() {
+        final OpenIdConnectPrepareAuthenticationRequest request = new OpenIdConnectPrepareAuthenticationRequest();
+        final ActionRequestValidationException validation = request.validate();
+        assertNotNull(validation);
+        assertThat(validation.validationErrors().size(), equalTo(1));
+        assertThat(validation.validationErrors().get(0), containsString("one of [realm, issuer] must be provided"));
+
+        final OpenIdConnectPrepareAuthenticationRequest request2 = new OpenIdConnectPrepareAuthenticationRequest();
+        request2.setRealmName("oidc-realm1");
+        request2.setIssuer("https://op.company.org/");
+        final ActionRequestValidationException validation2 = request2.validate();
+        assertNotNull(validation2);
+        assertThat(validation2.validationErrors().size(), equalTo(1));
+        assertThat(validation2.validationErrors().get(0),
+            containsString("only one of [realm, issuer] can be provided in the same request"));
+    }
+}
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java
new file mode 100644
index 00000000000..ddf17421099
--- /dev/null
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java
@@ -0,0 +1,230 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.action.oidc;
+
+import com.nimbusds.jwt.JWT;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.bulk.BulkAction;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.get.GetAction;
+import org.elasticsearch.action.get.GetRequestBuilder;
+import org.elasticsearch.action.index.IndexAction;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.action.update.UpdateAction;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.TestEnvironment;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.test.ClusterServiceUtils;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.watcher.ResourceWatcherService;
+import org.elasticsearch.xpack.core.XPackSettings;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectLogoutRequest;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectLogoutResponse;
+import org.elasticsearch.xpack.core.security.authc.Authentication;
+import org.elasticsearch.xpack.core.security.authc.RealmConfig;
+import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings;
+import org.elasticsearch.xpack.core.security.user.User;
+import org.elasticsearch.xpack.core.ssl.SSLService;
+import org.elasticsearch.xpack.security.authc.Realms;
+import org.elasticsearch.xpack.security.authc.TokenService;
+import org.elasticsearch.xpack.security.authc.UserToken;
+import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectRealm;
+import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectTestCase;
+import org.elasticsearch.xpack.security.authc.support.UserRoleMapper;
+import org.elasticsearch.xpack.security.support.SecurityIndexManager;
+import org.junit.After;
+import org.junit.Before;
+
+import java.time.Clock;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
+
+import static org.elasticsearch.xpack.security.authc.TokenServiceTests.mockGetTokenFromId;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.startsWith;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class TransportOpenIdConnectLogoutActionTests extends OpenIdConnectTestCase {
+
+    private OpenIdConnectRealm oidcRealm;
+    private TokenService tokenService;
+    private List<IndexRequest> indexRequests;
+    private List<BulkRequest> bulkRequests;
+    private Client client;
+    private TransportOpenIdConnectLogoutAction action;
+
+    @Before
+    public void setup() throws Exception {
+        final Settings settings = getBasicRealmSettings()
+            .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true)
+            .put("path.home", createTempDir())
+            .build();
+        final Settings sslSettings = Settings.builder()
+            .put("xpack.security.authc.realms.oidc.oidc-realm.ssl.verification_mode", "certificate")
+            .put("path.home", createTempDir())
+            .build();
+        final ThreadContext threadContext = new ThreadContext(settings);
+        final ThreadPool threadPool = mock(ThreadPool.class);
+        when(threadPool.getThreadContext()).thenReturn(threadContext);
+        new Authentication(new User("kibana"), new Authentication.RealmRef("realm", "type", "node"), null).writeToContext(threadContext);
+        indexRequests = new ArrayList<>();
+        bulkRequests = new ArrayList<>();
+        client = mock(Client.class);
+        when(client.threadPool()).thenReturn(threadPool);
+        when(client.settings()).thenReturn(settings);
+        doAnswer(invocationOnMock -> {
+            GetRequestBuilder builder = new GetRequestBuilder(client, GetAction.INSTANCE);
+            builder.setIndex((String) invocationOnMock.getArguments()[0])
+                .setType((String) invocationOnMock.getArguments()[1])
+                .setId((String) invocationOnMock.getArguments()[2]);
+            return builder;
+        }).when(client).prepareGet(anyString(), anyString(), anyString());
+        doAnswer(invocationOnMock -> {
+            IndexRequestBuilder builder = new IndexRequestBuilder(client, IndexAction.INSTANCE);
+            builder.setIndex((String) invocationOnMock.getArguments()[0])
+                .setType((String) invocationOnMock.getArguments()[1])
+                .setId((String) invocationOnMock.getArguments()[2]);
+            return builder;
+        }).when(client).prepareIndex(anyString(), anyString(), anyString());
+        doAnswer(invocationOnMock -> {
+            UpdateRequestBuilder builder = new UpdateRequestBuilder(client, UpdateAction.INSTANCE);
+            builder.setIndex((String) invocationOnMock.getArguments()[0])
+                .setType((String) invocationOnMock.getArguments()[1])
+                .setId((String) invocationOnMock.getArguments()[2]);
+            return builder;
+        }).when(client).prepareUpdate(anyString(), anyString(), anyString());
+        doAnswer(invocationOnMock -> {
+            BulkRequestBuilder builder = new BulkRequestBuilder(client, BulkAction.INSTANCE);
+            return builder;
+        }).when(client).prepareBulk();
+        doAnswer(invocationOnMock -> {
+            IndexRequest indexRequest = (IndexRequest) invocationOnMock.getArguments()[0];
+            ActionListener<IndexResponse> listener = (ActionListener<IndexResponse>) invocationOnMock.getArguments()[1];
+            indexRequests.add(indexRequest);
+            final IndexResponse response = new IndexResponse(
+                indexRequest.shardId(), indexRequest.type(), indexRequest.id(), 1, 1, 1, true);
+            listener.onResponse(response);
+            return Void.TYPE;
+        }).when(client).index(any(IndexRequest.class), any(ActionListener.class));
+        doAnswer(invocationOnMock -> {
+            IndexRequest indexRequest = (IndexRequest) invocationOnMock.getArguments()[1];
+            ActionListener<IndexResponse> listener = (ActionListener<IndexResponse>) invocationOnMock.getArguments()[2];
+            indexRequests.add(indexRequest);
+            final IndexResponse response = new IndexResponse(
+                indexRequest.shardId(), indexRequest.type(), indexRequest.id(), 1, 1, 1, true);
+            listener.onResponse(response);
+            return Void.TYPE;
+        }).when(client).execute(eq(IndexAction.INSTANCE), any(IndexRequest.class), any(ActionListener.class));
+        doAnswer(invocationOnMock -> {
+            BulkRequest bulkRequest = (BulkRequest) invocationOnMock.getArguments()[0];
+            ActionListener<BulkResponse> listener = (ActionListener<BulkResponse>) invocationOnMock.getArguments()[1];
+            bulkRequests.add(bulkRequest);
+            final BulkResponse response = new BulkResponse(new BulkItemResponse[0], 1);
+            listener.onResponse(response);
+            return Void.TYPE;
+        }).when(client).bulk(any(BulkRequest.class), any(ActionListener.class));
+
+        final SecurityIndexManager securityIndex = mock(SecurityIndexManager.class);
+        doAnswer(inv -> {
+            ((Runnable) inv.getArguments()[1]).run();
+            return null;
+        }).when(securityIndex).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class));
+        doAnswer(inv -> {
+            ((Runnable) inv.getArguments()[1]).run();
+            return null;
+        }).when(securityIndex).checkIndexVersionThenExecute(any(Consumer.class), any(Runnable.class));
+        when(securityIndex.isAvailable()).thenReturn(true);
+
+        final ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool);
+        tokenService = new TokenService(settings, Clock.systemUTC(), client, securityIndex, clusterService);
+
+        final TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null,
+            TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet());
+        final Realms realms = mock(Realms.class);
+        action = new TransportOpenIdConnectLogoutAction(transportService, mock(ActionFilters.class), realms, tokenService);
+
+        final Environment env = TestEnvironment.newEnvironment(settings);
+
+        final RealmConfig.RealmIdentifier realmIdentifier = new RealmConfig.RealmIdentifier("oidc", REALM_NAME);
+
+        final RealmConfig realmConfig = new RealmConfig(realmIdentifier, settings, env, threadContext);
+        oidcRealm = new OpenIdConnectRealm(realmConfig, new SSLService(sslSettings, env), mock(UserRoleMapper.class),
+            mock(ResourceWatcherService.class));
+        when(realms.realm(realmConfig.name())).thenReturn(oidcRealm);
+    }
+
+    public void testLogoutInvalidatesTokens() throws Exception {
+        final String subject = randomAlphaOfLength(8);
+        final JWT signedIdToken = generateIdToken(subject, randomAlphaOfLength(8), randomAlphaOfLength(8));
+        final User user = new User("oidc-user", new String[]{"superuser"}, null, null, null, true);
+        final Authentication.RealmRef realmRef = new Authentication.RealmRef(oidcRealm.name(), OpenIdConnectRealmSettings.TYPE, "node01");
+        final Authentication authentication = new Authentication(user, realmRef, null);
+
+        final Map<String, Object> tokenMetadata = new HashMap<>();
+        tokenMetadata.put("id_token_hint", signedIdToken.serialize());
+        tokenMetadata.put("oidc_realm", REALM_NAME);
+
+        final PlainActionFuture<Tuple<UserToken, String>> future = new PlainActionFuture<>();
+        tokenService.createOAuth2Tokens(authentication, authentication, tokenMetadata, true, future);
+        final UserToken userToken = future.actionGet().v1();
+        mockGetTokenFromId(userToken, false, client);
+        final String tokenString = tokenService.getAccessTokenAsString(userToken);
+
+        final OpenIdConnectLogoutRequest request = new OpenIdConnectLogoutRequest();
+        request.setToken(tokenString);
+
+        final PlainActionFuture<OpenIdConnectLogoutResponse> listener = new PlainActionFuture<>();
+        action.doExecute(mock(Task.class), request, listener);
+        final OpenIdConnectLogoutResponse response = listener.get();
+        assertNotNull(response);
+        assertThat(response.getEndSessionUrl(), notNullValue());
+        // One index request to create the token
+        assertThat(indexRequests.size(), equalTo(1));
+        final IndexRequest indexRequest = indexRequests.get(0);
+        assertThat(indexRequest, notNullValue());
+        assertThat(indexRequest.id(), startsWith("token"));
+        // One bulk request (containing one update request) to invalidate the token
+        assertThat(bulkRequests.size(), equalTo(1));
+        final BulkRequest bulkRequest = bulkRequests.get(0);
+        assertThat(bulkRequest.requests().size(), equalTo(1));
+        assertThat(bulkRequest.requests().get(0), instanceOf(UpdateRequest.class));
+        assertThat(bulkRequest.requests().get(0).id(), startsWith("token_"));
+        assertThat(bulkRequest.requests().get(0).toString(), containsString("\"access_token\":{\"invalidated\":true"));
+    }
+
+    @After
+    public void cleanup() {
+        oidcRealm.close();
+    }
+}
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java
index f9007583c2c..e3298e51037 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java
@@ -18,6 +18,7 @@ import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings;
+import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings;
 import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings;
 import org.elasticsearch.xpack.core.ssl.SSLService;
@@ -61,7 +62,7 @@ public class InternalRealmsTests extends ESTestCase {
         String type = randomFrom(NativeRealmSettings.TYPE, FileRealmSettings.TYPE, LdapRealmSettings.AD_TYPE, LdapRealmSettings.LDAP_TYPE,
                 PkiRealmSettings.TYPE);
         assertThat(InternalRealms.isStandardRealm(type), is(true));
-        type = randomFrom(SamlRealmSettings.TYPE, KerberosRealmSettings.TYPE);
+        type = randomFrom(SamlRealmSettings.TYPE, KerberosRealmSettings.TYPE, OpenIdConnectRealmSettings.TYPE);
         assertThat(InternalRealms.isStandardRealm(type), is(false));
     }
 }
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java
index 202467147cd..3bc89d29f8d 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java
@@ -17,6 +17,7 @@ import org.elasticsearch.xpack.core.security.authc.Realm;
 import org.elasticsearch.xpack.core.security.authc.RealmConfig;
 import org.elasticsearch.xpack.core.security.authc.RealmSettings;
 import org.elasticsearch.xpack.security.authc.kerberos.KerberosRealmTestCase;
+import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectTestCase;
 import org.elasticsearch.xpack.security.authc.saml.SamlRealmTestHelper;
 import org.hamcrest.Matchers;
 import org.junit.AfterClass;
@@ -48,6 +49,9 @@ public class SecurityRealmSettingsTests extends SecurityIntegTestCase {
             final Path kerbKeyTab = createTempFile("es", "keytab");
             KerberosRealmTestCase.writeKeyTab(kerbKeyTab, null);
 
+            final Path jwkSet = createTempFile("jwkset", "json");
+            OpenIdConnectTestCase.writeJwkSetToFile(jwkSet);
+
             settings = Settings.builder()
                 .put(super.nodeSettings(nodeOrdinal).filter(s -> s.startsWith("xpack.security.authc.realms.") == false))
                 .put("xpack.security.authc.token.enabled", true)
@@ -67,6 +71,16 @@ public class SecurityRealmSettingsTests extends SecurityIntegTestCase {
                 .put("xpack.security.authc.realms.saml.saml1.attributes.principal", "uid")
                 .put("xpack.security.authc.realms.kerberos.kerb1.order", 7)
                 .put("xpack.security.authc.realms.kerberos.kerb1.keytab.path", kerbKeyTab.toAbsolutePath())
+                .put("xpack.security.authc.realms.oidc.oidc1.order", 8)
+                .put("xpack.security.authc.realms.oidc.oidc1.op.name", "myprovider")
+                .put("xpack.security.authc.realms.oidc.oidc1.op.issuer", "https://the.issuer.com:8090")
+                .put("xpack.security.authc.realms.oidc.oidc1.op.jwkset_path", jwkSet.toAbsolutePath())
+                .put("xpack.security.authc.realms.oidc.oidc1.op.authorization_endpoint", "https://the.issuer.com:8090/login")
+                .put("xpack.security.authc.realms.oidc.oidc1.op.token_endpoint", "https://the.issuer.com:8090/token")
+                .put("xpack.security.authc.realms.oidc.oidc1.rp.redirect_uri", "https://localhost/cb")
+                .put("xpack.security.authc.realms.oidc.oidc1.rp.client_id", "my_client")
+                .put("xpack.security.authc.realms.oidc.oidc1.rp.response_type", "code")
+                .put("xpack.security.authc.realms.oidc.oidc1.claims.principal", "sub")
                 .build();
         } catch (IOException e) {
             throw new RuntimeException(e);
@@ -84,7 +98,7 @@ public class SecurityRealmSettingsTests extends SecurityIntegTestCase {
     }
 
     /**
-     * Some realms (currently only SAML, but maybe more in the future) hold on to resources that may need to be explicitly closed.
+     * Some realms (SAML and OIDC at the moment) hold on to resources that may need to be explicitly closed.
      */
     @AfterClass
     public static void closeRealms() throws IOException {
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java
new file mode 100644
index 00000000000..e7fdbfe558a
--- /dev/null
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java
@@ -0,0 +1,808 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.authc.oidc;
+
+import com.nimbusds.jose.JWSAlgorithm;
+import com.nimbusds.jose.JWSHeader;
+import com.nimbusds.jose.crypto.ECDSASigner;
+import com.nimbusds.jose.crypto.MACSigner;
+import com.nimbusds.jose.crypto.RSASSASigner;
+import com.nimbusds.jose.jwk.ECKey;
+import com.nimbusds.jose.jwk.JWK;
+import com.nimbusds.jose.jwk.JWKSet;
+import com.nimbusds.jose.jwk.KeyUse;
+import com.nimbusds.jose.jwk.OctetSequenceKey;
+import com.nimbusds.jose.jwk.RSAKey;
+import com.nimbusds.jose.proc.BadJOSEException;
+import com.nimbusds.jose.proc.BadJWSException;
+import com.nimbusds.jose.proc.JWSVerificationKeySelector;
+import com.nimbusds.jwt.JWT;
+import com.nimbusds.jwt.JWTClaimsSet;
+import com.nimbusds.jwt.PlainJWT;
+import com.nimbusds.jwt.SignedJWT;
+import com.nimbusds.jwt.proc.BadJWTException;
+import com.nimbusds.oauth2.sdk.ResponseType;
+import com.nimbusds.oauth2.sdk.Scope;
+import com.nimbusds.oauth2.sdk.auth.Secret;
+import com.nimbusds.oauth2.sdk.id.ClientID;
+import com.nimbusds.oauth2.sdk.id.Issuer;
+import com.nimbusds.oauth2.sdk.id.State;
+import com.nimbusds.oauth2.sdk.token.AccessToken;
+import com.nimbusds.oauth2.sdk.token.BearerAccessToken;
+import com.nimbusds.openid.connect.sdk.AuthenticationSuccessResponse;
+import com.nimbusds.openid.connect.sdk.Nonce;
+import com.nimbusds.openid.connect.sdk.claims.AccessTokenHash;
+import com.nimbusds.openid.connect.sdk.validators.IDTokenValidator;
+import com.nimbusds.openid.connect.sdk.validators.InvalidHashException;
+import org.elasticsearch.ElasticsearchSecurityException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.SecureString;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.TestEnvironment;
+import org.elasticsearch.xpack.core.security.authc.RealmConfig;
+import org.elasticsearch.xpack.core.ssl.SSLService;
+import org.junit.After;
+import org.junit.Before;
+import org.mockito.Mockito;
+
+import javax.crypto.SecretKey;
+import javax.crypto.spec.SecretKeySpec;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.StandardCharsets;
+import java.security.Key;
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.PrivateKey;
+import java.security.interfaces.ECPrivateKey;
+import java.security.interfaces.ECPublicKey;
+import java.security.interfaces.RSAPrivateKey;
+import java.security.interfaces.RSAPublicKey;
+import java.util.Base64;
+import java.util.Collections;
+import java.util.Date;
+import java.util.UUID;
+
+import static java.time.Instant.now;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class OpenIdConnectAuthenticatorTests extends OpenIdConnectTestCase {
+
+    private OpenIdConnectAuthenticator authenticator;
+    private Settings globalSettings;
+    private Environment env;
+    private ThreadContext threadContext;
+
+    @Before
+    public void setup() {
+        globalSettings = Settings.builder().put("path.home", createTempDir())
+            .put("xpack.security.authc.realms.oidc.oidc-realm.ssl.verification_mode", "certificate").build();
+        env = TestEnvironment.newEnvironment(globalSettings);
+        threadContext = new ThreadContext(globalSettings);
+    }
+
+    @After
+    public void cleanup() {
+        authenticator.close();
+    }
+
+    private OpenIdConnectAuthenticator buildAuthenticator() throws URISyntaxException {
+        final RealmConfig config = buildConfig(getBasicRealmSettings().build(), threadContext);
+        return new OpenIdConnectAuthenticator(config, getOpConfig(), getDefaultRpConfig(), new SSLService(globalSettings, env), null);
+    }
+
+    private OpenIdConnectAuthenticator buildAuthenticator(OpenIdConnectProviderConfiguration opConfig, RelyingPartyConfiguration rpConfig,
+                                                          OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource) {
+        final RealmConfig config = buildConfig(getBasicRealmSettings().build(), threadContext);
+        final JWSVerificationKeySelector keySelector = new JWSVerificationKeySelector(rpConfig.getSignatureAlgorithm(), jwkSource);
+        final IDTokenValidator validator = new IDTokenValidator(opConfig.getIssuer(), rpConfig.getClientId(), keySelector, null);
+        return new OpenIdConnectAuthenticator(config, opConfig, rpConfig, new SSLService(globalSettings, env), validator,
+            null);
+    }
+
+    private OpenIdConnectAuthenticator buildAuthenticator(OpenIdConnectProviderConfiguration opConfig,
+                                                          RelyingPartyConfiguration rpConfig) {
+        final RealmConfig config = buildConfig(getBasicRealmSettings().build(), threadContext);
+        final IDTokenValidator validator = new IDTokenValidator(opConfig.getIssuer(), rpConfig.getClientId(),
+            rpConfig.getSignatureAlgorithm(), new Secret(rpConfig.getClientSecret().toString()));
+        return new OpenIdConnectAuthenticator(config, opConfig, rpConfig, new SSLService(globalSettings, env), validator,
+            null);
+    }
+
+    public void testEmptyRedirectUrlIsRejected() throws Exception {
+        authenticator = buildAuthenticator();
+        OpenIdConnectToken token = new OpenIdConnectToken(null, new State(), new Nonce());
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to consume the OpenID connect response"));
+    }
+
+    public void testInvalidStateIsRejected() throws URISyntaxException {
+        authenticator = buildAuthenticator();
+        final String code = randomAlphaOfLengthBetween(8, 12);
+        final String state = randomAlphaOfLengthBetween(8, 12);
+        final String invalidState = state.concat(randomAlphaOfLength(2));
+        final String redirectUrl = "https://rp.elastic.co/cb?code=" + code + "&state=" + state;
+        OpenIdConnectToken token = new OpenIdConnectToken(redirectUrl, new State(invalidState), new Nonce());
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Invalid state parameter"));
+    }
+
+    public void testInvalidNonceIsRejected() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType(randomFrom("HS", "ES", "RS"));
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        if (jwk.getAlgorithm().getName().startsWith("HS")) {
+            authenticator = buildAuthenticator(opConfig, rpConfig);
+        } else {
+            OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+            authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+        }
+
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final Nonce invalidNonce = new Nonce();
+        final String subject = "janedoe";
+        final String keyId = (jwk.getAlgorithm().getName().startsWith("HS")) ? null : jwk.getKeyID();
+        final Tuple<AccessToken, JWT> tokens = buildTokens(invalidNonce, key, jwk.getAlgorithm().getName(), keyId, subject, true, false);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token"));
+        assertThat(e.getCause(), instanceOf(BadJWTException.class));
+        assertThat(e.getCause().getMessage(), containsString("Unexpected JWT nonce"));
+    }
+
+    public void testAuthenticateImplicitFlowWithRsa() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType("RS");
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+        authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final Tuple<AccessToken, JWT> tokens = buildTokens(nonce, key, jwk.getAlgorithm().getName(), jwk.getKeyID(), subject, true, false);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        JWTClaimsSet claimsSet = future.actionGet();
+        assertThat(claimsSet.getSubject(), equalTo(subject));
+    }
+
+    public void testAuthenticateImplicitFlowWithEcdsa() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType("RS");
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+        authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final Tuple<AccessToken, JWT> tokens = buildTokens(nonce, key, jwk.getAlgorithm().getName(), jwk.getKeyID(), subject, true, false);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        JWTClaimsSet claimsSet = future.actionGet();
+        assertThat(claimsSet.getSubject(), equalTo(subject));
+    }
+
+    public void testAuthenticateImplicitFlowWithHmac() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType("HS");
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        authenticator = buildAuthenticator(opConfig, rpConfig);
+
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final Tuple<AccessToken, JWT> tokens = buildTokens(nonce, key, jwk.getAlgorithm().getName(), null, subject, true, false);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        JWTClaimsSet claimsSet = future.actionGet();
+        assertThat(claimsSet.getSubject(), equalTo(subject));
+    }
+
+    public void testClockSkewIsHonored() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType(randomFrom("HS", "ES", "RS"));
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        if (jwk.getAlgorithm().getName().startsWith("HS")) {
+            authenticator = buildAuthenticator(opConfig, rpConfig);
+        } else {
+            OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+            authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+        }
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final String keyId = (jwk.getAlgorithm().getName().startsWith("HS")) ? null : jwk.getKeyID();
+        JWTClaimsSet.Builder idTokenBuilder = new JWTClaimsSet.Builder()
+            .jwtID(randomAlphaOfLength(8))
+            .audience(rpConfig.getClientId().getValue())
+            // Expired 55 seconds ago with an allowed clock skew of 60 seconds
+            .expirationTime(Date.from(now().minusSeconds(55)))
+            .issuer(opConfig.getIssuer().getValue())
+            .issueTime(Date.from(now().minusSeconds(200)))
+            .notBeforeTime(Date.from(now().minusSeconds(200)))
+            .claim("nonce", nonce)
+            .subject(subject);
+        final Tuple<AccessToken, JWT> tokens = buildTokens(idTokenBuilder.build(), key, jwk.getAlgorithm().getName(), keyId, subject,
+            true, false);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        JWTClaimsSet claimsSet = future.actionGet();
+        assertThat(claimsSet.getSubject(), equalTo(subject));
+    }
+
+    public void testImplicitFlowFailsWithExpiredToken() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType(randomFrom("HS", "ES", "RS"));
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        if (jwk.getAlgorithm().getName().startsWith("HS")) {
+            authenticator = buildAuthenticator(opConfig, rpConfig);
+        } else {
+            OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+            authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+        }
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final String keyId = (jwk.getAlgorithm().getName().startsWith("HS")) ? null : jwk.getKeyID();
+        JWTClaimsSet.Builder idTokenBuilder = new JWTClaimsSet.Builder()
+            .jwtID(randomAlphaOfLength(8))
+            .audience(rpConfig.getClientId().getValue())
+            // Expired 65 seconds ago with an allowed clock skew of 60 seconds
+            .expirationTime(Date.from(now().minusSeconds(65)))
+            .issuer(opConfig.getIssuer().getValue())
+            .issueTime(Date.from(now().minusSeconds(200)))
+            .notBeforeTime(Date.from(now().minusSeconds(200)))
+            .claim("nonce", nonce)
+            .subject(subject);
+        final Tuple<AccessToken, JWT> tokens = buildTokens(idTokenBuilder.build(), key, jwk.getAlgorithm().getName(), keyId,
+            subject, true, false);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token"));
+        assertThat(e.getCause(), instanceOf(BadJWTException.class));
+        assertThat(e.getCause().getMessage(), containsString("Expired JWT"));
+    }
+
+    public void testImplicitFlowFailsNotYetIssuedToken() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType(randomFrom("HS", "ES", "RS"));
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        if (jwk.getAlgorithm().getName().startsWith("HS")) {
+            authenticator = buildAuthenticator(opConfig, rpConfig);
+        } else {
+            OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+            authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+        }
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final String keyId = (jwk.getAlgorithm().getName().startsWith("HS")) ? null : jwk.getKeyID();
+        JWTClaimsSet.Builder idTokenBuilder = new JWTClaimsSet.Builder()
+            .jwtID(randomAlphaOfLength(8))
+            .audience(rpConfig.getClientId().getValue())
+            .expirationTime(Date.from(now().plusSeconds(3600)))
+            .issuer(opConfig.getIssuer().getValue())
+            // Issued 80 seconds in the future with max allowed clock skew of 60
+            .issueTime(Date.from(now().plusSeconds(80)))
+            .notBeforeTime(Date.from(now().minusSeconds(80)))
+            .claim("nonce", nonce)
+            .subject(subject);
+        final Tuple<AccessToken, JWT> tokens = buildTokens(idTokenBuilder.build(), key, jwk.getAlgorithm().getName(), keyId,
+            subject, true, false);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token"));
+        assertThat(e.getCause(), instanceOf(BadJWTException.class));
+        assertThat(e.getCause().getMessage(), containsString("JWT issue time ahead of current time"));
+    }
+
+    public void testImplicitFlowFailsInvalidIssuer() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType(randomFrom("HS", "ES", "RS"));
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        if (jwk.getAlgorithm().getName().startsWith("HS")) {
+            authenticator = buildAuthenticator(opConfig, rpConfig);
+        } else {
+            OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+            authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+        }
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final String keyId = (jwk.getAlgorithm().getName().startsWith("HS")) ? null : jwk.getKeyID();
+        JWTClaimsSet.Builder idTokenBuilder = new JWTClaimsSet.Builder()
+            .jwtID(randomAlphaOfLength(8))
+            .audience(rpConfig.getClientId().getValue())
+            .expirationTime(Date.from(now().plusSeconds(3600)))
+            .issuer("https://another.op.org")
+            .issueTime(Date.from(now().minusSeconds(200)))
+            .notBeforeTime(Date.from(now().minusSeconds(200)))
+            .claim("nonce", nonce)
+            .subject(subject);
+        final Tuple<AccessToken, JWT> tokens = buildTokens(idTokenBuilder.build(), key, jwk.getAlgorithm().getName(), keyId,
+            subject, true, false);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token"));
+        assertThat(e.getCause(), instanceOf(BadJWTException.class));
+        assertThat(e.getCause().getMessage(), containsString("Unexpected JWT issuer"));
+    }
+
+    public void testImplicitFlowFailsInvalidAudience() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType(randomFrom("HS", "ES", "RS"));
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        if (jwk.getAlgorithm().getName().startsWith("HS")) {
+            authenticator = buildAuthenticator(opConfig, rpConfig);
+        } else {
+            OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+            authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+        }
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final String keyId = (jwk.getAlgorithm().getName().startsWith("HS")) ? null : jwk.getKeyID();
+        JWTClaimsSet.Builder idTokenBuilder = new JWTClaimsSet.Builder()
+            .jwtID(randomAlphaOfLength(8))
+            .audience("some-other-RP")
+            .expirationTime(Date.from(now().plusSeconds(3600)))
+            .issuer(opConfig.getIssuer().getValue())
+            .issueTime(Date.from(now().minusSeconds(200)))
+            .notBeforeTime(Date.from(now().minusSeconds(80)))
+            .claim("nonce", nonce)
+            .subject(subject);
+        final Tuple<AccessToken, JWT> tokens = buildTokens(idTokenBuilder.build(), key, jwk.getAlgorithm().getName(), keyId,
+            subject, true, false);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token"));
+        assertThat(e.getCause(), instanceOf(BadJWTException.class));
+        assertThat(e.getCause().getMessage(), containsString("Unexpected JWT audience"));
+    }
+
+    public void testAuthenticateImplicitFlowFailsWithForgedRsaIdToken() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType("RS");
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+        authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final Tuple<AccessToken, JWT> tokens = buildTokens(nonce, key, jwk.getAlgorithm().getName(), jwk.getKeyID(), subject, true, true);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token"));
+        assertThat(e.getCause(), instanceOf(BadJWSException.class));
+        assertThat(e.getCause().getMessage(), containsString("Signed JWT rejected: Invalid signature"));
+    }
+
+    public void testAuthenticateImplicitFlowFailsWithForgedEcsdsaIdToken() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType("ES");
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+        authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final Tuple<AccessToken, JWT> tokens = buildTokens(nonce, key, jwk.getAlgorithm().getName(), jwk.getKeyID(), subject, true, true);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token"));
+        assertThat(e.getCause(), instanceOf(BadJWSException.class));
+        assertThat(e.getCause().getMessage(), containsString("Signed JWT rejected: Invalid signature"));
+    }
+
+    public void testAuthenticateImplicitFlowFailsWithForgedHmacIdToken() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType("HS");
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        authenticator = buildAuthenticator(opConfig, rpConfig);
+
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final Tuple<AccessToken, JWT> tokens = buildTokens(nonce, key, jwk.getAlgorithm().getName(), null, subject, true, true);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token"));
+        assertThat(e.getCause(), instanceOf(BadJWSException.class));
+        assertThat(e.getCause().getMessage(), containsString("Signed JWT rejected: Invalid signature"));
+    }
+
+    public void testAuthenticateImplicitFlowFailsWithForgedAccessToken() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType(randomFrom("HS", "ES", "RS"));
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        if (jwk.getAlgorithm().getName().startsWith("HS")) {
+            authenticator = buildAuthenticator(opConfig, rpConfig);
+        } else {
+            OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+            authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+        }
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final String keyId = (jwk.getAlgorithm().getName().startsWith("HS")) ? null : jwk.getKeyID();
+        final Tuple<AccessToken, JWT> tokens = buildTokens(nonce, key, jwk.getAlgorithm().getName(), keyId, subject, true, false);
+        final String responseUrl = buildAuthResponse(tokens.v2(), new BearerAccessToken("someforgedAccessToken"), state,
+            rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to verify access token"));
+        assertThat(e.getCause(), instanceOf(InvalidHashException.class));
+        assertThat(e.getCause().getMessage(), containsString("Access token hash (at_hash) mismatch"));
+    }
+
+    public void testImplicitFlowFailsWithNoneAlgorithm() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType(randomFrom("HS"));
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        final Key key = keyMaterial.v1();
+        RelyingPartyConfiguration rpConfig = getRpConfigNoAccessToken(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        if (jwk.getAlgorithm().getName().startsWith("HS")) {
+            authenticator = buildAuthenticator(opConfig, rpConfig);
+        } else {
+            OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+            authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+        }
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        final String keyId = (jwk.getAlgorithm().getName().startsWith("HS")) ? null : jwk.getKeyID();
+        final Tuple<AccessToken, JWT> tokens = buildTokens(nonce, key, jwk.getAlgorithm().getName(), keyId, subject, false, false);
+        JWT idToken = tokens.v2();
+        // Change the algorithm of the signed JWT to NONE
+        String[] serializedParts = idToken.serialize().split("\\.");
+        String legitimateHeader = new String(Base64.getUrlDecoder().decode(serializedParts[0]), StandardCharsets.UTF_8);
+        String forgedHeader = legitimateHeader.replace(jwk.getAlgorithm().getName(), "NONE");
+        String encodedForgedHeader =
+            Base64.getUrlEncoder().withoutPadding().encodeToString(forgedHeader.getBytes(StandardCharsets.UTF_8));
+        String fordedTokenString = encodedForgedHeader + "." + serializedParts[1] + "." + serializedParts[2];
+        idToken = SignedJWT.parse(fordedTokenString);
+        final String responseUrl = buildAuthResponse(idToken, tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token"));
+        assertThat(e.getCause(), instanceOf(BadJOSEException.class));
+        assertThat(e.getCause().getMessage(), containsString("Another algorithm expected, or no matching key(s) found"));
+    }
+
+    /**
+     * The premise of this attack is that an RP that expects a JWT signed with an asymmetric algorithm (RSA, ECDSA)
+     * receives a JWT signed with an HMAC. Trusting the received JWT's alg claim more than it's own configuration,
+     * it attempts to validate the HMAC with the provider's {RSA,EC} public key as a secret key
+     */
+    public void testImplicitFlowFailsWithAlgorithmMixupAttack() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType(randomFrom("ES", "RS"));
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+        authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        SecretKeySpec hmacKey = new SecretKeySpec("thisismysupersupersupersupersupersuperlongsecret".getBytes(StandardCharsets.UTF_8),
+            "HmacSha384");
+        final Tuple<AccessToken, JWT> tokens = buildTokens(nonce, hmacKey, "HS384", null, subject,
+            true, false);
+        final String responseUrl = buildAuthResponse(tokens.v2(), tokens.v1(), state, rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token"));
+        assertThat(e.getCause(), instanceOf(BadJOSEException.class));
+        assertThat(e.getCause().getMessage(), containsString("Another algorithm expected, or no matching key(s) found"));
+    }
+
+    public void testImplicitFlowFailsWithUnsignedJwt() throws Exception {
+        final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType(randomFrom("HS", "ES", "RS"));
+        final JWK jwk = keyMaterial.v2().getKeys().get(0);
+        RelyingPartyConfiguration rpConfig = getRpConfigNoAccessToken(jwk.getAlgorithm().getName());
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        if (jwk.getAlgorithm().getName().startsWith("HS")) {
+            authenticator = buildAuthenticator(opConfig, rpConfig);
+        } else {
+            OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource = mockSource(jwk);
+            authenticator = buildAuthenticator(opConfig, rpConfig, jwkSource);
+        }
+        final State state = new State();
+        final Nonce nonce = new Nonce();
+        final String subject = "janedoe";
+        JWTClaimsSet.Builder idTokenBuilder = new JWTClaimsSet.Builder()
+            .jwtID(randomAlphaOfLength(8))
+            .audience(rpConfig.getClientId().getValue())
+            .expirationTime(Date.from(now().plusSeconds(3600)))
+            .issuer(opConfig.getIssuer().getValue())
+            .issueTime(Date.from(now().minusSeconds(200)))
+            .notBeforeTime(Date.from(now().minusSeconds(200)))
+            .claim("nonce", nonce)
+            .subject(subject);
+
+        final String responseUrl = buildAuthResponse(new PlainJWT(idTokenBuilder.build()), null, state,
+            rpConfig.getRedirectUri());
+        final OpenIdConnectToken token = new OpenIdConnectToken(responseUrl, state, nonce);
+        final PlainActionFuture<JWTClaimsSet> future = new PlainActionFuture<>();
+        authenticator.authenticate(token, future);
+        ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class,
+            future::actionGet);
+        assertThat(e.getMessage(), containsString("Failed to parse or validate the ID Token"));
+        assertThat(e.getCause(), instanceOf(BadJWTException.class));
+        assertThat(e.getCause().getMessage(), containsString("Signed ID token expected"));
+    }
+
+    private OpenIdConnectProviderConfiguration getOpConfig() throws URISyntaxException {
+        return new OpenIdConnectProviderConfiguration("op_name",
+            new Issuer("https://op.example.com"),
+            "https://op.example.org/jwks.json",
+            new URI("https://op.example.org/login"),
+            new URI("https://op.example.org/token"),
+            null,
+            new URI("https://op.example.org/logout"));
+    }
+
+    private RelyingPartyConfiguration getDefaultRpConfig() throws URISyntaxException {
+        return new RelyingPartyConfiguration(
+            new ClientID("rp-my"),
+            new SecureString("thisismysupersupersupersupersupersuperlongsecret".toCharArray()),
+            new URI("https://rp.elastic.co/cb"),
+            new ResponseType("id_token", "token"),
+            new Scope("openid"),
+            JWSAlgorithm.RS384,
+            new URI("https://rp.elastic.co/successfull_logout"));
+    }
+    private RelyingPartyConfiguration getRpConfig(String alg) throws URISyntaxException {
+        return new RelyingPartyConfiguration(
+            new ClientID("rp-my"),
+            new SecureString("thisismysupersupersupersupersupersuperlongsecret".toCharArray()),
+            new URI("https://rp.elastic.co/cb"),
+            new ResponseType("id_token", "token"),
+            new Scope("openid"),
+            JWSAlgorithm.parse(alg),
+            new URI("https://rp.elastic.co/successfull_logout"));
+    }
+
+    private RelyingPartyConfiguration getRpConfigNoAccessToken(String alg) throws URISyntaxException {
+        return new RelyingPartyConfiguration(
+            new ClientID("rp-my"),
+            new SecureString("thisismysupersupersupersupersupersuperlongsecret".toCharArray()),
+            new URI("https://rp.elastic.co/cb"),
+            new ResponseType("id_token"),
+            new Scope("openid"),
+            JWSAlgorithm.parse(alg),
+            new URI("https://rp.elastic.co/successfull_logout"));
+    }
+
+    private String buildAuthResponse(JWT idToken, @Nullable AccessToken accessToken, State state, URI redirectUri) {
+        AuthenticationSuccessResponse response = new AuthenticationSuccessResponse(
+            redirectUri,
+            null,
+            idToken,
+            accessToken,
+            state,
+            null,
+            null);
+        return response.toURI().toString();
+    }
+
+    private OpenIdConnectAuthenticator.ReloadableJWKSource mockSource(JWK jwk) {
+        OpenIdConnectAuthenticator.ReloadableJWKSource jwkSource =
+            mock(OpenIdConnectAuthenticator.ReloadableJWKSource.class);
+        when(jwkSource.get(any(), any())).thenReturn(Collections.singletonList(jwk));
+        Mockito.doAnswer(invocation -> {
+            @SuppressWarnings("unchecked")
+            ActionListener<Void> listener = (ActionListener<Void>) invocation.getArguments()[0];
+            listener.onResponse(null);
+            return null;
+        }).when(jwkSource).triggerReload(any(ActionListener.class));
+        return jwkSource;
+    }
+
+    private Tuple<AccessToken, JWT> buildTokens(JWTClaimsSet idToken, Key key, String alg, String keyId,
+                                                String subject, boolean withAccessToken, boolean forged) throws Exception {
+        AccessToken accessToken = null;
+        if (withAccessToken) {
+            accessToken = new BearerAccessToken(Base64.getUrlEncoder().encodeToString(randomByteArrayOfLength(32)));
+            AccessTokenHash expectedHash = AccessTokenHash.compute(accessToken, JWSAlgorithm.parse(alg));
+            idToken = JWTClaimsSet.parse(idToken.toJSONObject().appendField("at_hash", expectedHash.getValue()));
+        }
+        SignedJWT jwt = new SignedJWT(
+            new JWSHeader.Builder(JWSAlgorithm.parse(alg)).keyID(keyId).build(),
+            idToken);
+
+        if (key instanceof RSAPrivateKey) {
+            jwt.sign(new RSASSASigner((PrivateKey) key));
+        } else if (key instanceof ECPrivateKey) {
+            jwt.sign(new ECDSASigner((ECPrivateKey) key));
+        } else if (key instanceof SecretKey) {
+            jwt.sign(new MACSigner((SecretKey) key));
+        }
+        if (forged) {
+            // Change the sub claim to "attacker"
+            String[] serializedParts = jwt.serialize().split("\\.");
+            String legitimatePayload = new String(Base64.getUrlDecoder().decode(serializedParts[1]), StandardCharsets.UTF_8);
+            String forgedPayload = legitimatePayload.replace(subject, "attacker");
+            String encodedForgedPayload =
+                Base64.getUrlEncoder().withoutPadding().encodeToString(forgedPayload.getBytes(StandardCharsets.UTF_8));
+            String fordedTokenString = serializedParts[0] + "." + encodedForgedPayload + "." + serializedParts[2];
+            jwt = SignedJWT.parse(fordedTokenString);
+        }
+        return new Tuple<>(accessToken, jwt);
+    }
+
+    private Tuple<AccessToken, JWT> buildTokens(Nonce nonce, Key key, String alg, String keyId, String subject, boolean withAccessToken,
+                                                boolean forged) throws Exception {
+        RelyingPartyConfiguration rpConfig = getRpConfig(alg);
+        OpenIdConnectProviderConfiguration opConfig = getOpConfig();
+        JWTClaimsSet.Builder idTokenBuilder = new JWTClaimsSet.Builder()
+            .jwtID(randomAlphaOfLength(8))
+            .audience(rpConfig.getClientId().getValue())
+            .expirationTime(Date.from(now().plusSeconds(3600)))
+            .issuer(opConfig.getIssuer().getValue())
+            .issueTime(Date.from(now().minusSeconds(4)))
+            .notBeforeTime(Date.from(now().minusSeconds(4)))
+            .claim("nonce", nonce)
+            .subject(subject);
+
+        return buildTokens(idTokenBuilder.build(), key, alg, keyId, subject, withAccessToken, forged);
+    }
+
+    private Tuple<Key, JWKSet> getRandomJwkForType(String type) throws Exception {
+        JWK jwk;
+        Key key;
+        int hashSize;
+        if (type.equals("RS")) {
+            hashSize = randomFrom(256, 384, 512);
+            int keySize = randomFrom(2048, 4096);
+            KeyPairGenerator gen = KeyPairGenerator.getInstance("RSA");
+            gen.initialize(keySize);
+            KeyPair keyPair = gen.generateKeyPair();
+            key = keyPair.getPrivate();
+            jwk = new RSAKey.Builder((RSAPublicKey) keyPair.getPublic())
+                .privateKey((RSAPrivateKey) keyPair.getPrivate())
+                .keyUse(KeyUse.SIGNATURE)
+                .keyID(UUID.randomUUID().toString())
+                .algorithm(JWSAlgorithm.parse(type + hashSize))
+                .build();
+
+        } else if (type.equals("HS")) {
+            hashSize = randomFrom(256, 384);
+            SecretKeySpec hmacKey = new SecretKeySpec("thisismysupersupersupersupersupersuperlongsecret".getBytes(StandardCharsets.UTF_8),
+                "HmacSha" + hashSize);
+            //SecretKey hmacKey = KeyGenerator.getInstance("HmacSha" + hashSize).generateKey();
+            key = hmacKey;
+            jwk = new OctetSequenceKey.Builder(hmacKey)
+                .keyID(UUID.randomUUID().toString())
+                .algorithm(JWSAlgorithm.parse(type + hashSize))
+                .build();
+
+        } else if (type.equals("ES")) {
+            hashSize = randomFrom(256, 384, 512);
+            ECKey.Curve curve = curveFromHashSize(hashSize);
+            KeyPairGenerator gen = KeyPairGenerator.getInstance("EC");
+            gen.initialize(curve.toECParameterSpec());
+            KeyPair keyPair = gen.generateKeyPair();
+            key = keyPair.getPrivate();
+            jwk = new ECKey.Builder(curve, (ECPublicKey) keyPair.getPublic())
+                .privateKey((ECPrivateKey) keyPair.getPrivate())
+                .algorithm(JWSAlgorithm.parse(type + hashSize))
+                .build();
+        } else {
+            throw new IllegalArgumentException("Invalid key type :" + type);
+        }
+        return new Tuple(key, new JWKSet(jwk));
+    }
+
+    private ECKey.Curve curveFromHashSize(int size) {
+        if (size == 256) {
+            return ECKey.Curve.P_256;
+        } else if (size == 384) {
+            return ECKey.Curve.P_384;
+        } else if (size == 512) {
+            return ECKey.Curve.P_521;
+        } else {
+            throw new IllegalArgumentException("Invalid hash size:" + size);
+        }
+    }
+}
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java
new file mode 100644
index 00000000000..cd92168b3aa
--- /dev/null
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java
@@ -0,0 +1,256 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.authc.oidc;
+
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsException;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.TestEnvironment;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.xpack.core.security.authc.RealmConfig;
+import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey;
+
+public class OpenIdConnectRealmSettingsTests extends ESTestCase {
+
+    private static final String REALM_NAME = "oidc1-realm";
+    private ThreadContext threadContext;
+
+    @Before
+    public void setupEnv() {
+        Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build();
+        threadContext = new ThreadContext(globalSettings);
+    }
+
+    public void testIncorrectResponseTypeThrowsError() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "hybrid");
+        IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> {
+            new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+        });
+        assertThat(exception.getMessage(), Matchers.containsString(getFullSettingKey(REALM_NAME,
+            OpenIdConnectRealmSettings.RP_RESPONSE_TYPE)));
+    }
+
+    public void testMissingAuthorizationEndpointThrowsError() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+        SettingsException exception = expectThrows(SettingsException.class, () -> {
+            new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+        });
+        assertThat(exception.getMessage(),
+            Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT)));
+    }
+
+    public void testInvalidAuthorizationEndpointThrowsError() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "this is not a URI")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+        IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> {
+            new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+        });
+        assertThat(exception.getMessage(),
+            Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT)));
+    }
+
+    public void testMissingTokenEndpointThrowsError() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+        SettingsException exception = expectThrows(SettingsException.class, () -> {
+            new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+        });
+        assertThat(exception.getMessage(),
+            Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT)));
+    }
+
+    public void testInvalidTokenEndpointThrowsError() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "This is not a uri")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+        IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> {
+            new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+        });
+        assertThat(exception.getMessage(),
+            Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT)));
+    }
+
+    public void testMissingJwksUrlThrowsError() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+        SettingsException exception = expectThrows(SettingsException.class, () -> {
+            new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+        });
+        assertThat(exception.getMessage(),
+            Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH)));
+    }
+
+    public void testMissingIssuerThrowsError() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+        SettingsException exception = expectThrows(SettingsException.class, () -> {
+            new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+        });
+        assertThat(exception.getMessage(),
+            Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER)));
+    }
+
+    public void testMissingNameTypeThrowsError() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+        SettingsException exception = expectThrows(SettingsException.class, () -> {
+            new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+        });
+        assertThat(exception.getMessage(),
+            Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME)));
+    }
+
+    public void testMissingRedirectUriThrowsError() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+        SettingsException exception = expectThrows(SettingsException.class, () -> {
+            new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+        });
+        assertThat(exception.getMessage(),
+            Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI)));
+    }
+
+    public void testMissingClientIdThrowsError() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+        SettingsException exception = expectThrows(SettingsException.class, () -> {
+            new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+        });
+        assertThat(exception.getMessage(),
+            Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID)));
+    }
+
+    public void testMissingPrincipalClaimThrowsError() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code")
+            .putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES),
+                Arrays.asList("openid", "scope1", "scope2"));
+        SettingsException exception = expectThrows(SettingsException.class, () -> {
+            new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+        });
+        assertThat(exception.getMessage(),
+            Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim())));
+    }
+
+    public void testPatternWithoutSettingThrowsError() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getPattern()), "^(.*)$")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code")
+            .putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES),
+                Arrays.asList("openid", "scope1", "scope2"));
+        SettingsException exception = expectThrows(SettingsException.class, () -> {
+            new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null);
+        });
+        assertThat(exception.getMessage(),
+            Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getClaim())));
+        assertThat(exception.getMessage(),
+            Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getPattern())));
+    }
+
+    private RealmConfig buildConfig(Settings realmSettings) {
+        final Settings settings = Settings.builder()
+            .put("path.home", createTempDir())
+            .put(realmSettings).build();
+        final Environment env = TestEnvironment.newEnvironment(settings);
+        return new RealmConfig(new RealmConfig.RealmIdentifier("oidc", REALM_NAME), settings, env, threadContext);
+    }
+}
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java
new file mode 100644
index 00000000000..0d26c0b442c
--- /dev/null
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java
@@ -0,0 +1,341 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.authc.oidc;
+
+import com.nimbusds.jwt.JWT;
+import com.nimbusds.jwt.JWTClaimsSet;
+import com.nimbusds.oauth2.sdk.id.State;
+import com.nimbusds.openid.connect.sdk.Nonce;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.TestEnvironment;
+import org.elasticsearch.license.XPackLicenseState;
+
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectLogoutResponse;
+import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectPrepareAuthenticationResponse;
+import org.elasticsearch.xpack.core.security.authc.AuthenticationResult;
+import org.elasticsearch.xpack.core.security.authc.Realm;
+import org.elasticsearch.xpack.core.security.authc.RealmConfig;
+import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings;
+import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings;
+import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings;
+import org.elasticsearch.xpack.core.security.user.User;
+import org.elasticsearch.xpack.security.authc.support.MockLookupRealm;
+import org.elasticsearch.xpack.security.authc.support.UserRoleMapper;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static java.time.Instant.now;
+import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey;
+import static org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectRealm.CONTEXT_TOKEN_DATA;
+import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class OpenIdConnectRealmTests extends OpenIdConnectTestCase {
+
+    private Settings globalSettings;
+    private Environment env;
+    private ThreadContext threadContext;
+
+    @Before
+    public void setupEnv() {
+        globalSettings = Settings.builder().put("path.home", createTempDir()).build();
+        env = TestEnvironment.newEnvironment(globalSettings);
+        threadContext = new ThreadContext(globalSettings);
+    }
+
+    public void testAuthentication() throws Exception {
+        final UserRoleMapper roleMapper = mock(UserRoleMapper.class);
+        AtomicReference<UserRoleMapper.UserData> userData = new AtomicReference<>();
+        doAnswer(invocation -> {
+            assert invocation.getArguments().length == 2;
+            userData.set((UserRoleMapper.UserData) invocation.getArguments()[0]);
+            ActionListener<Set<String>> listener = (ActionListener<Set<String>>) invocation.getArguments()[1];
+            listener.onResponse(new HashSet<>(Arrays.asList("kibana_user", "role1")));
+            return null;
+        }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class));
+
+        final boolean notPopulateMetadata = randomBoolean();
+
+        AuthenticationResult result = authenticateWithOidc(roleMapper, notPopulateMetadata, false);
+        assertThat(result.getUser().roles(), arrayContainingInAnyOrder("kibana_user", "role1"));
+        if (notPopulateMetadata == false) {
+            assertThat(result.getUser().metadata().get("oidc(iss)"), equalTo("https://op.company.org"));
+            assertThat(result.getUser().metadata().get("oidc(name)"), equalTo("Clinton Barton"));
+        }
+    }
+
+    public void testWithAuthorizingRealm() throws Exception {
+        final UserRoleMapper roleMapper = mock(UserRoleMapper.class);
+        doAnswer(invocation -> {
+            assert invocation.getArguments().length == 2;
+            ActionListener<Set<String>> listener = (ActionListener<Set<String>>) invocation.getArguments()[1];
+            listener.onFailure(new RuntimeException("Role mapping should not be called"));
+            return null;
+        }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class));
+
+        AuthenticationResult result = authenticateWithOidc(roleMapper, randomBoolean(), true);
+        assertThat(result.getUser().roles(), arrayContainingInAnyOrder("lookup_user_role"));
+        assertThat(result.getUser().fullName(), equalTo("Clinton Barton"));
+        assertThat(result.getUser().metadata().entrySet(), Matchers.iterableWithSize(1));
+        assertThat(result.getUser().metadata().get("is_lookup"), Matchers.equalTo(true));
+        assertNotNull(result.getMetadata().get(CONTEXT_TOKEN_DATA));
+        assertThat(result.getMetadata().get(CONTEXT_TOKEN_DATA), instanceOf(Map.class));
+        Map<String, Object> tokenMetadata = (Map) result.getMetadata().get(CONTEXT_TOKEN_DATA);
+        assertThat(tokenMetadata.get("id_token_hint"), equalTo("thisis.aserialized.jwt"));
+    }
+
+    public void testClaimPatternParsing() throws Exception {
+        final Settings.Builder builder = getBasicRealmSettings();
+        builder.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getPattern()), "^OIDC-(.+)");
+        final RealmConfig config = buildConfig(builder.build(), threadContext);
+        final OpenIdConnectRealmSettings.ClaimSetting principalSetting = new OpenIdConnectRealmSettings.ClaimSetting("principal");
+        final OpenIdConnectRealm.ClaimParser parser = OpenIdConnectRealm.ClaimParser.forSetting(logger, principalSetting, config, true);
+        final JWTClaimsSet claims = new JWTClaimsSet.Builder()
+            .subject("OIDC-cbarton")
+            .audience("https://rp.elastic.co/cb")
+            .expirationTime(Date.from(now().plusSeconds(3600)))
+            .issueTime(Date.from(now().minusSeconds(5)))
+            .jwtID(randomAlphaOfLength(8))
+            .issuer("https://op.company.org")
+            .build();
+        assertThat(parser.getClaimValue(claims), equalTo("cbarton"));
+    }
+
+    public void testInvalidPrincipalClaimPatternParsing() {
+        final OpenIdConnectAuthenticator authenticator = mock(OpenIdConnectAuthenticator.class);
+        final OpenIdConnectToken token = new OpenIdConnectToken("", new State(), new Nonce());
+        final Settings.Builder builder = getBasicRealmSettings();
+        builder.put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getPattern()), "^OIDC-(.+)");
+        final RealmConfig config = buildConfig(builder.build(), threadContext);
+        final OpenIdConnectRealm realm = new OpenIdConnectRealm(config, authenticator, null);
+        final JWTClaimsSet claims = new JWTClaimsSet.Builder()
+            .subject("cbarton@avengers.com")
+            .audience("https://rp.elastic.co/cb")
+            .expirationTime(Date.from(now().plusSeconds(3600)))
+            .issueTime(Date.from(now().minusSeconds(5)))
+            .jwtID(randomAlphaOfLength(8))
+            .issuer("https://op.company.org")
+            .build();
+        doAnswer((i) -> {
+            ActionListener<JWTClaimsSet> listener = (ActionListener<JWTClaimsSet>) i.getArguments()[1];
+            listener.onResponse(claims);
+            return null;
+        }).when(authenticator).authenticate(any(OpenIdConnectToken.class), any(ActionListener.class));
+
+        final PlainActionFuture<AuthenticationResult> future = new PlainActionFuture<>();
+        realm.authenticate(token, future);
+        final AuthenticationResult result = future.actionGet();
+        assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE));
+        assertThat(result.getMessage(), containsString("claims.principal"));
+        assertThat(result.getMessage(), containsString("sub"));
+        assertThat(result.getMessage(), containsString("^OIDC-(.+)"));
+    }
+
+    public void testBuildRelyingPartyConfigWithoutOpenIdScope() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code")
+            .putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES),
+                Arrays.asList("scope1", "scope2"));
+        final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null,
+            null);
+        final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(null, null, null);
+        final String state = response.getState();
+        final String nonce = response.getNonce();
+        assertThat(response.getAuthenticationRequestUrl(),
+            equalTo("https://op.example.com/login?scope=scope1+scope2+openid&response_type=code" +
+                "&redirect_uri=https%3A%2F%2Frp.my.com%2Fcb&state=" + state + "&nonce=" + nonce + "&client_id=rp-my"));
+    }
+
+    public void testBuildingAuthenticationRequest() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code")
+            .putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES),
+                Arrays.asList("openid", "scope1", "scope2"));
+        final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null,
+            null);
+        final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(null, null, null);
+        final String state = response.getState();
+        final String nonce = response.getNonce();
+        assertThat(response.getAuthenticationRequestUrl(),
+            equalTo("https://op.example.com/login?scope=openid+scope1+scope2&response_type=code" +
+                "&redirect_uri=https%3A%2F%2Frp.my.com%2Fcb&state=" + state + "&nonce=" + nonce + "&client_id=rp-my"));
+    }
+
+    public void testBuilidingAuthenticationRequestWithDefaultScope() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+        final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null,
+            null);
+        final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(null, null, null);
+        final String state = response.getState();
+        final String nonce = response.getNonce();
+        assertThat(response.getAuthenticationRequestUrl(), equalTo("https://op.example.com/login?scope=openid&response_type=code" +
+            "&redirect_uri=https%3A%2F%2Frp.my.com%2Fcb&state=" + state + "&nonce=" + nonce + "&client_id=rp-my"));
+    }
+
+    public void testBuildLogoutResponse() throws Exception {
+        final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(getBasicRealmSettings().build(), threadContext), null,
+            null);
+        // Random strings, as we will not validate the token here
+        final JWT idToken = generateIdToken(randomAlphaOfLength(8), randomAlphaOfLength(8), randomAlphaOfLength(8));
+        final OpenIdConnectLogoutResponse logoutResponse = realm.buildLogoutResponse(idToken);
+        assertThat(logoutResponse.getEndSessionUrl(), containsString("https://op.example.org/logout?id_token_hint="));
+        assertThat(logoutResponse.getEndSessionUrl(),
+            containsString("&post_logout_redirect_uri=https%3A%2F%2Frp.elastic.co%2Fsucc_logout&state="));
+    }
+
+    public void testBuildingAuthenticationRequestWithExistingStateAndNonce() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+        final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null,
+            null);
+        final String state = new State().getValue();
+        final String nonce = new Nonce().getValue();
+        final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(state, nonce, null);
+
+        assertThat(response.getAuthenticationRequestUrl(), equalTo("https://op.example.com/login?scope=openid&response_type=code" +
+            "&redirect_uri=https%3A%2F%2Frp.my.com%2Fcb&state=" + state + "&nonce=" + nonce + "&client_id=rp-my"));
+    }
+
+    public void testBuildingAuthenticationRequestWithLoginHint() {
+        final Settings.Builder settingsBuilder = Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code");
+        final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null,
+            null);
+        final String state = new State().getValue();
+        final String nonce = new Nonce().getValue();
+        final String thehint = randomAlphaOfLength(8);
+        final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(state, nonce, thehint);
+
+        assertThat(response.getAuthenticationRequestUrl(), equalTo("https://op.example.com/login?login_hint=" + thehint +
+            "&scope=openid&response_type=code&redirect_uri=https%3A%2F%2Frp.my.com%2Fcb&state=" +
+            state + "&nonce=" + nonce + "&client_id=rp-my"));
+    }
+
+    private AuthenticationResult authenticateWithOidc(UserRoleMapper roleMapper, boolean notPopulateMetadata, boolean useAuthorizingRealm)
+        throws Exception {
+
+        final String principal = "324235435454";
+        final MockLookupRealm lookupRealm = new MockLookupRealm(
+            new RealmConfig(new RealmConfig.RealmIdentifier("mock", "mock_lookup"), globalSettings, env, threadContext));
+        final OpenIdConnectAuthenticator authenticator = mock(OpenIdConnectAuthenticator.class);
+
+        final Settings.Builder builder = getBasicRealmSettings();
+        if (notPopulateMetadata) {
+            builder.put(getFullSettingKey(REALM_NAME, SamlRealmSettings.POPULATE_USER_METADATA),
+                false);
+        }
+        if (useAuthorizingRealm) {
+            builder.putList(getFullSettingKey(new RealmConfig.RealmIdentifier("oidc", REALM_NAME),
+                DelegatedAuthorizationSettings.AUTHZ_REALMS), lookupRealm.name());
+            lookupRealm.registerUser(new User(principal, new String[]{"lookup_user_role"}, "Clinton Barton", "cbarton@shield.gov",
+                Collections.singletonMap("is_lookup", true), true));
+        }
+        final RealmConfig config = buildConfig(builder.build(), threadContext);
+        final OpenIdConnectRealm realm = new OpenIdConnectRealm(config, authenticator, roleMapper);
+        initializeRealms(realm, lookupRealm);
+        final OpenIdConnectToken token = new OpenIdConnectToken("", new State(), new Nonce());
+        final JWTClaimsSet claims = new JWTClaimsSet.Builder()
+            .subject(principal)
+            .audience("https://rp.elastic.co/cb")
+            .expirationTime(Date.from(now().plusSeconds(3600)))
+            .issueTime(Date.from(now().minusSeconds(5)))
+            .jwtID(randomAlphaOfLength(8))
+            .issuer("https://op.company.org")
+            .claim("groups", Arrays.asList("group1", "group2", "groups3"))
+            .claim("mail", "cbarton@shield.gov")
+            .claim("name", "Clinton Barton")
+            .claim("id_token_hint", "thisis.aserialized.jwt")
+            .build();
+
+        doAnswer((i) -> {
+            ActionListener<JWTClaimsSet> listener = (ActionListener<JWTClaimsSet>) i.getArguments()[1];
+            listener.onResponse(claims);
+            return null;
+        }).when(authenticator).authenticate(any(OpenIdConnectToken.class), any(ActionListener.class));
+
+        final PlainActionFuture<AuthenticationResult> future = new PlainActionFuture<>();
+        realm.authenticate(token, future);
+        final AuthenticationResult result = future.get();
+        assertThat(result, notNullValue());
+        assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS));
+        assertThat(result.getUser().principal(), equalTo(principal));
+        assertThat(result.getUser().email(), equalTo("cbarton@shield.gov"));
+        assertThat(result.getUser().fullName(), equalTo("Clinton Barton"));
+
+        return result;
+    }
+
+    private void initializeRealms(Realm... realms) {
+        XPackLicenseState licenseState = mock(XPackLicenseState.class);
+        when(licenseState.isAuthorizationRealmAllowed()).thenReturn(true);
+
+        final List<Realm> realmList = Arrays.asList(realms);
+        for (Realm realm : realms) {
+            realm.initialize(realmList, licenseState);
+        }
+    }
+}
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java
new file mode 100644
index 00000000000..df5acb0c3a7
--- /dev/null
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java
@@ -0,0 +1,112 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.authc.oidc;
+
+import com.nimbusds.jose.JWSAlgorithm;
+import com.nimbusds.jose.JWSHeader;
+import com.nimbusds.jose.crypto.RSASSASigner;
+import com.nimbusds.jwt.JWT;
+import com.nimbusds.jwt.JWTClaimsSet;
+import com.nimbusds.jwt.SignedJWT;
+import com.nimbusds.openid.connect.sdk.Nonce;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.TestEnvironment;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.xpack.core.security.authc.RealmConfig;
+import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.util.Arrays;
+import java.util.Date;
+
+import static java.time.Instant.now;
+import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey;
+
+public abstract class OpenIdConnectTestCase extends ESTestCase {
+
+    protected static final String REALM_NAME = "oidc-realm";
+
+    protected static Settings.Builder getBasicRealmSettings() {
+        return Settings.builder()
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.org/login")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.org/token")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ENDSESSION_ENDPOINT), "https://op.example.org/logout")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.org/jwks.json")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.elastic.co/cb")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_POST_LOGOUT_REDIRECT_URI), "https://rp.elastic.co/succ_logout")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), randomFrom("code", "id_token"))
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.GROUPS_CLAIM.getClaim()), "groups")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.MAIL_CLAIM.getClaim()), "mail")
+            .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getClaim()), "name");
+    }
+
+    protected JWT generateIdToken(String subject, String audience, String issuer) throws Exception {
+        int hashSize = randomFrom(256, 384, 512);
+        int keySize = randomFrom(2048, 4096);
+        KeyPairGenerator gen = KeyPairGenerator.getInstance("RSA");
+        gen.initialize(keySize);
+        KeyPair keyPair = gen.generateKeyPair();
+        JWTClaimsSet idTokenClaims = new JWTClaimsSet.Builder()
+            .jwtID(randomAlphaOfLength(8))
+            .audience(audience)
+            .expirationTime(Date.from(now().plusSeconds(3600)))
+            .issuer(issuer)
+            .issueTime(Date.from(now().minusSeconds(4)))
+            .notBeforeTime(Date.from(now().minusSeconds(4)))
+            .claim("nonce", new Nonce())
+            .subject(subject)
+            .build();
+
+        SignedJWT jwt = new SignedJWT(
+            new JWSHeader.Builder(JWSAlgorithm.parse("RS" + hashSize)).build(),
+            idTokenClaims);
+        jwt.sign(new RSASSASigner(keyPair.getPrivate()));
+        return jwt;
+    }
+
+    protected RealmConfig buildConfig(Settings realmSettings, ThreadContext threadContext) {
+        final Settings settings = Settings.builder()
+            .put("path.home", createTempDir())
+            .put(realmSettings).build();
+        final Environment env = TestEnvironment.newEnvironment(settings);
+        return new RealmConfig(new RealmConfig.RealmIdentifier("oidc", REALM_NAME), settings, env, threadContext);
+    }
+
+    public static void writeJwkSetToFile(Path file) throws IOException {
+        Files.write(file, Arrays.asList(
+            "{\n" +
+                "  \"keys\": [\n" +
+                "    {\n" +
+                "      \"kty\": \"RSA\",\n" +
+                "      \"d\": \"lT2V49RNsu0eTroQDqFCiHY-CkPWdKfKAf66sJrWPNpSX8URa6pTCruFQMsb9ZSqQ8eIvqys9I9rq6Wpaxn1aGRahVzxp7nsBPZYw" +
+                "SY09LRzhvAxJwWdwtF-ogrV5-p99W9mhEa0khot3myzzfWNnGzcf1IudqvkqE9zrlUJg-kvA3icbs6HgaZVAevb_mx-bgbtJdnUxyPGwXLyQ7g6hlntQ" +
+                "R_vpzTnK7XFU6fvkrojh7UPJkanKAH0gf3qPrB-Y2gQML7RSlKo-ZfJNHa83G4NRLHKuWTI6dSKJlqmS9zWGmyC3dx5kGjgqD6YgwtWlip8q-U839zxt" +
+                "z25yeslsQ\",\n" +
+                "      \"e\": \"AQAB\",\n" +
+                "      \"use\": \"sig\",\n" +
+                "      \"kid\": \"testkey\",\n" +
+                "      \"alg\": \"RS256\",\n" +
+                "      \"n\": \"lXBe4UngWJiUfbqbeOvwbH04kYLCpeH4k0o3ngScZDo6ydc_gBDEVwPLQpi8D930aIzr3XHP3RCj0hnpxUun7MNMhWxJZVOd1eg5u" +
+                "uO-nPIhkqr9iGKV5srJk0Dvw0wBaGZuXMBheY2ViNaKTR9EEtjNwU2d2-I5U3YlrnFR6nj-Pn_hWaiCbb_pSFM4w9QpoLDmuwMRanHY_YK7Td2WMICSG" +
+                "P3IRGmbecRZCqgkWVZk396EMoMLNxi8WcErYknyY9r-QeJMruRkr27kgx78L7KZ9uBmu9oKXRQl15ZDYe7Bnt9E5wSdOCV9R9h5VRVUur-_129XkDeAX" +
+                "-6re63_Mw\"\n" +
+                "    }\n" +
+                "  ]\n" +
+                "}"
+        ));
+    }
+}
diff --git a/x-pack/qa/oidc-op-tests/build.gradle b/x-pack/qa/oidc-op-tests/build.gradle
new file mode 100644
index 00000000000..72fd21c9932
--- /dev/null
+++ b/x-pack/qa/oidc-op-tests/build.gradle
@@ -0,0 +1,84 @@
+Project idpFixtureProject = xpackProject("test:idp-fixture")
+
+apply plugin: 'elasticsearch.standalone-rest-test'
+apply plugin: 'elasticsearch.rest-test'
+apply plugin: 'elasticsearch.test.fixtures'
+
+dependencies {
+    // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here
+    testCompile project(path: xpackModule('core'), configuration: 'default')
+    testCompile project(path: xpackModule('core'), configuration: 'testArtifacts')
+    testCompile project(path: xpackModule('security'), configuration: 'testArtifacts')
+}
+testFixtures.useFixture ":x-pack:test:idp-fixture"
+
+String ephemeralPort;
+task setupPorts {
+    // Don't attempt to get ephemeral ports when Docker is not available
+    onlyIf { idpFixtureProject.postProcessFixture.enabled }
+    dependsOn idpFixtureProject.postProcessFixture
+    doLast {
+        ephemeralPort = idpFixtureProject.postProcessFixture.ext."test.fixtures.oidc-provider.tcp.8080"
+    }
+}
+
+integTestCluster {
+    dependsOn setupPorts
+    setting 'xpack.license.self_generated.type', 'trial'
+    setting 'xpack.security.enabled', 'true'
+    setting 'xpack.security.http.ssl.enabled', 'false'
+    setting 'xpack.security.authc.token.enabled', 'true'
+    setting 'xpack.security.authc.realms.file.file.order', '0'
+    setting 'xpack.security.authc.realms.native.native.order', '1'
+    // OpenID Connect Realm 1 configured for authorization grant flow
+    setting 'xpack.security.authc.realms.oidc.c2id.order', '2'
+    setting 'xpack.security.authc.realms.oidc.c2id.op.name', 'c2id-op'
+    setting 'xpack.security.authc.realms.oidc.c2id.op.issuer', 'http://localhost:8080'
+    setting 'xpack.security.authc.realms.oidc.c2id.op.authorization_endpoint', "http://127.0.0.1:${-> ephemeralPort}/c2id-login"
+    setting 'xpack.security.authc.realms.oidc.c2id.op.token_endpoint', "http://127.0.0.1:${-> ephemeralPort}/c2id/token"
+    setting 'xpack.security.authc.realms.oidc.c2id.op.userinfo_endpoint', "http://127.0.0.1:${-> ephemeralPort}/c2id/userinfo"
+    setting 'xpack.security.authc.realms.oidc.c2id.op.jwkset_path', 'op-jwks.json'
+    setting 'xpack.security.authc.realms.oidc.c2id.rp.redirect_uri', 'https://my.fantastic.rp/cb'
+    setting 'xpack.security.authc.realms.oidc.c2id.rp.client_id', 'elasticsearch-rp'
+    keystoreSetting 'xpack.security.authc.realms.oidc.c2id.rp.client_secret', 'b07efb7a1cf6ec9462afe7b6d3ab55c6c7880262aa61ac28dded292aca47c9a2'
+    setting 'xpack.security.authc.realms.oidc.c2id.rp.response_type', 'code'
+    setting 'xpack.security.authc.realms.oidc.c2id.claims.principal', 'sub'
+    setting 'xpack.security.authc.realms.oidc.c2id.claims.name', 'name'
+    setting 'xpack.security.authc.realms.oidc.c2id.claims.mail', 'email'
+    setting 'xpack.security.authc.realms.oidc.c2id.claims.groups', 'groups'
+    // OpenID Connect Realm 2 configured for implicit flow
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.order', '3'
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.op.name', 'c2id-implicit'
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.op.issuer', 'http://localhost:8080'
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.op.authorization_endpoint', "http://127.0.0.1:${-> ephemeralPort}/c2id-login"
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.op.token_endpoint', "http://127.0.0.1:${-> ephemeralPort}/c2id/token"
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.op.userinfo_endpoint', "http://127.0.0.1:${-> ephemeralPort}/c2id/userinfo"
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.op.jwkset_path', 'op-jwks.json'
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.rp.redirect_uri', 'https://my.fantastic.rp/cb'
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.rp.client_id', 'elasticsearch-rp'
+    keystoreSetting 'xpack.security.authc.realms.oidc.c2id-implicit.rp.client_secret', 'b07efb7a1cf6ec9462afe7b6d3ab55c6c7880262aa61ac28dded292aca47c9a2'
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.rp.response_type', 'id_token token'
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.claims.principal', 'sub'
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.claims.name', 'name'
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.claims.mail', 'email'
+    setting 'xpack.security.authc.realms.oidc.c2id-implicit.claims.groups', 'groups'
+    setting 'xpack.ml.enabled', 'false'
+    
+    extraConfigFile 'op-jwks.json', idpFixtureProject.file("oidc/op-jwks.json")
+
+    setupCommand 'setupTestAdmin',
+            'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser"
+
+    waitCondition = { node, ant ->
+        File tmpFile = new File(node.cwd, 'wait.success')
+        ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow",
+                dest: tmpFile.toString(),
+                username: 'test_admin',
+                password: 'x-pack-test-password',
+                ignoreerrors: true,
+                retries: 10)
+        return tmpFile.exists()
+    }
+}
+
+thirdPartyAudit.enabled = false
\ No newline at end of file
diff --git a/x-pack/qa/oidc-op-tests/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthIT.java b/x-pack/qa/oidc-op-tests/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthIT.java
new file mode 100644
index 00000000000..7835b236ed8
--- /dev/null
+++ b/x-pack/qa/oidc-op-tests/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthIT.java
@@ -0,0 +1,394 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.security.authc.oidc;
+
+import net.minidev.json.JSONObject;
+import net.minidev.json.parser.JSONParser;
+import org.apache.http.Header;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.StatusLine;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.entity.ContentType;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.http.message.BasicHeader;
+import org.apache.http.protocol.BasicHttpContext;
+import org.apache.http.protocol.HttpContext;
+import org.apache.http.util.EntityUtils;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.elasticsearch.client.Request;
+import org.elasticsearch.client.RequestOptions;
+import org.elasticsearch.client.Response;
+import org.elasticsearch.common.CheckedFunction;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.SecureString;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.rest.ESRestTestCase;
+import org.elasticsearch.xpack.core.common.socket.SocketAccess;
+import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentHelper.convertToMap;
+import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
+import static org.hamcrest.Matchers.containsInAnyOrder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+public class OpenIdConnectAuthIT extends ESRestTestCase {
+
+    private static final String REALM_NAME = "c2id";
+    private static final String REALM_NAME_IMPLICIT = "c2id-implicit";
+    private static final String FACILITATOR_PASSWORD = "f@cilit@t0r";
+    private static final String REGISTRATION_URL = "http://127.0.0.1:" + getEphemeralPortFromProperty("8080") + "/c2id/clients";
+    private static final String LOGIN_API = "http://127.0.0.1:" + getEphemeralPortFromProperty("8080") + "/c2id-login/api/";
+
+    @Before
+    public void setupUserAndRoles() throws IOException {
+        setFacilitatorUser();
+        setRoleMappings();
+    }
+
+    /**
+     * C2id server only supports dynamic registration, so we can't pre-seed it's config with our client data. Execute only once
+     */
+    @BeforeClass
+    public static void registerClient() throws Exception {
+        try (CloseableHttpClient httpClient = HttpClients.createDefault()) {
+            HttpPost httpPost = new HttpPost(REGISTRATION_URL);
+            final BasicHttpContext context = new BasicHttpContext();
+            String json = "{" +
+                "\"grant_types\": [\"implicit\", \"authorization_code\"]," +
+                "\"response_types\": [\"code\", \"token id_token\"]," +
+                "\"preferred_client_id\":\"elasticsearch-rp\"," +
+                "\"preferred_client_secret\":\"b07efb7a1cf6ec9462afe7b6d3ab55c6c7880262aa61ac28dded292aca47c9a2\"," +
+                "\"redirect_uris\": [\"https://my.fantastic.rp/cb\"]" +
+                "}";
+            httpPost.setEntity(new StringEntity(json, ContentType.APPLICATION_JSON));
+            httpPost.setHeader("Accept", "application/json");
+            httpPost.setHeader("Content-type", "application/json");
+            httpPost.setHeader("Authorization", "Bearer 811fa888f3e0fdc9e01d4201bfeee46a");
+            CloseableHttpResponse response = SocketAccess.doPrivileged(() -> httpClient.execute(httpPost, context));
+            assertThat(response.getStatusLine().getStatusCode(), equalTo(200));
+        }
+    }
+
+    @Override
+    protected Settings restAdminSettings() {
+        String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray()));
+        return Settings.builder()
+            .put(ThreadContext.PREFIX + ".Authorization", token)
+            .build();
+    }
+
+    private String authenticateAtOP(URI opAuthUri) throws Exception {
+        // C2ID doesn't have a non JS login page :/, so use their API directly
+        // see https://connect2id.com/products/server/docs/guides/login-page
+        try (CloseableHttpClient httpClient = HttpClients.createDefault()) {
+            final BasicHttpContext context = new BasicHttpContext();
+            // Initiate the authentication process
+            HttpPost httpPost = new HttpPost(LOGIN_API + "initAuthRequest");
+            String initJson = "{" +
+                "  \"qs\":\"" + opAuthUri.getRawQuery() + "\"" +
+                "}";
+            configureJsonRequest(httpPost, initJson);
+            JSONObject initResponse = execute(httpClient, httpPost, context, response -> {
+                assertHttpOk(response.getStatusLine());
+                return parseJsonResponse(response);
+            });
+            assertThat(initResponse.getAsString("type"), equalTo("auth"));
+            final String sid = initResponse.getAsString("sid");
+            // Actually authenticate the user with ldapAuth
+            HttpPost loginHttpPost = new HttpPost(LOGIN_API + "authenticateSubject?cacheBuster=" + randomAlphaOfLength(8));
+            String loginJson = "{" +
+                "\"username\":\"alice\"," +
+                "\"password\":\"secret\"" +
+                "}";
+            configureJsonRequest(loginHttpPost, loginJson);
+            JSONObject loginJsonResponse = execute(httpClient, loginHttpPost, context, response -> {
+                assertHttpOk(response.getStatusLine());
+                return parseJsonResponse(response);
+            });
+            // Get the consent screen
+            HttpPut consentFetchHttpPut =
+                new HttpPut(LOGIN_API + "updateAuthRequest" + "/" + sid + "?cacheBuster=" + randomAlphaOfLength(8));
+            String consentFetchJson = "{" +
+                "\"sub\": \"" + loginJsonResponse.getAsString("id") + "\"," +
+                "\"acr\": \"http://loa.c2id.com/basic\"," +
+                "\"amr\": [\"pwd\"]," +
+                "\"data\": {" +
+                "\"email\": \"" + loginJsonResponse.getAsString("email") + "\"," +
+                "\"name\": \"" + loginJsonResponse.getAsString("name") + "\"" +
+                "}" +
+                "}";
+            configureJsonRequest(consentFetchHttpPut, consentFetchJson);
+            JSONObject consentFetchResponse = execute(httpClient, consentFetchHttpPut, context, response -> {
+                assertHttpOk(response.getStatusLine());
+                return parseJsonResponse(response);
+            });
+            if (consentFetchResponse.getAsString("type").equals("consent")) {
+                // If needed, submit the consent
+                HttpPut consentHttpPut =
+                    new HttpPut(LOGIN_API + "updateAuthRequest" + "/" + sid + "?cacheBuster=" + randomAlphaOfLength(8));
+                String consentJson = "{" +
+                    "\"claims\":[\"name\", \"email\"]," +
+                    "\"scope\":[\"openid\"]" +
+                    "}";
+                configureJsonRequest(consentHttpPut, consentJson);
+                JSONObject jsonConsentResponse = execute(httpClient, consentHttpPut, context, response -> {
+                    assertHttpOk(response.getStatusLine());
+                    return parseJsonResponse(response);
+                });
+                assertThat(jsonConsentResponse.getAsString("type"), equalTo("response"));
+                JSONObject parameters = (JSONObject) jsonConsentResponse.get("parameters");
+                return parameters.getAsString("uri");
+            } else if (consentFetchResponse.getAsString("type").equals("response")) {
+                JSONObject parameters = (JSONObject) consentFetchResponse.get("parameters");
+                return parameters.getAsString("uri");
+            } else {
+                fail("Received an invalid response from the OP");
+                return null;
+            }
+        }
+    }
+
+    private static String getEphemeralPortFromProperty(String port) {
+        String key = "test.fixtures.oidc-provider.tcp." + port;
+        final String value = System.getProperty(key);
+        assertNotNull("Expected the actual value for port " + port + " to be in system property " + key, value);
+        return value;
+    }
+
+    private Map<String, Object> callAuthenticateApiUsingAccessToken(String accessToken) throws IOException {
+        Request request = new Request("GET", "/_security/_authenticate");
+        RequestOptions.Builder options = request.getOptions().toBuilder();
+        options.addHeader("Authorization", "Bearer " + accessToken);
+        request.setOptions(options);
+        return entityAsMap(client().performRequest(request));
+    }
+
+    private <T> T execute(CloseableHttpClient client, HttpEntityEnclosingRequestBase request,
+                          HttpContext context, CheckedFunction<HttpResponse, T, Exception> body)
+        throws Exception {
+        final int timeout = (int) TimeValue.timeValueSeconds(90).millis();
+        RequestConfig requestConfig = RequestConfig.custom()
+            .setConnectionRequestTimeout(timeout)
+            .setConnectTimeout(timeout)
+            .setSocketTimeout(timeout)
+            .build();
+        request.setConfig(requestConfig);
+        logger.info("Execute HTTP " + request.getMethod() + " " + request.getURI() +
+            " with payload " + EntityUtils.toString(request.getEntity()));
+        try (CloseableHttpResponse response = SocketAccess.doPrivileged(() -> client.execute(request, context))) {
+            return body.apply(response);
+        } catch (Exception e) {
+            logger.warn(new ParameterizedMessage("HTTP Request [{}] failed", request.getURI()), e);
+            throw e;
+        }
+    }
+
+    private JSONObject parseJsonResponse(HttpResponse response) throws Exception {
+        JSONParser parser = new JSONParser(JSONParser.DEFAULT_PERMISSIVE_MODE);
+        String entity = EntityUtils.toString(response.getEntity());
+        logger.info("Response entity as string: " + entity);
+        return (JSONObject) parser.parse(entity);
+    }
+
+    private void configureJsonRequest(HttpEntityEnclosingRequestBase request, String jsonBody) {
+        StringEntity entity = new StringEntity(jsonBody, ContentType.APPLICATION_JSON);
+        request.setEntity(entity);
+        request.setHeader("Accept", "application/json");
+        request.setHeader("Content-type", "application/json");
+    }
+
+    public void testAuthenticateWithCodeFlow() throws Exception {
+        final PrepareAuthResponse prepareAuthResponse = getRedirectedFromFacilitator(REALM_NAME);
+        final String redirectUri = authenticateAtOP(prepareAuthResponse.getAuthUri());
+        Tuple<String, String> tokens = completeAuthentication(redirectUri, prepareAuthResponse.getState(),
+            prepareAuthResponse.getNonce());
+        verifyElasticsearchAccessTokenForCodeFlow(tokens.v1());
+    }
+
+    public void testAuthenticateWithImplicitFlow() throws Exception {
+        final PrepareAuthResponse prepareAuthResponse = getRedirectedFromFacilitator(REALM_NAME_IMPLICIT);
+        final String redirectUri = authenticateAtOP(prepareAuthResponse.getAuthUri());
+        Tuple<String, String> tokens = completeAuthentication(redirectUri, prepareAuthResponse.getState(),
+            prepareAuthResponse.getNonce());
+        verifyElasticsearchAccessTokenForImplicitFlow(tokens.v1());
+    }
+
+    private void verifyElasticsearchAccessTokenForCodeFlow(String accessToken) throws IOException {
+        final Map<String, Object> map = callAuthenticateApiUsingAccessToken(accessToken);
+        logger.info("Authentication with token Response: " + map);
+        assertThat(map.get("username"), equalTo("alice"));
+        assertThat((List<?>) map.get("roles"), containsInAnyOrder("kibana_user", "auditor"));
+
+        assertThat(map.get("metadata"), instanceOf(Map.class));
+        final Map<?, ?> metadata = (Map<?, ?>) map.get("metadata");
+        assertThat(metadata.get("oidc(sub)"), equalTo("alice"));
+        assertThat(metadata.get("oidc(iss)"), equalTo("http://localhost:8080"));
+    }
+
+    private void verifyElasticsearchAccessTokenForImplicitFlow(String accessToken) throws IOException {
+        final Map<String, Object> map = callAuthenticateApiUsingAccessToken(accessToken);
+        logger.info("Authentication with token Response: " + map);
+        assertThat(map.get("username"), equalTo("alice"));
+        assertThat((List<?>) map.get("roles"), containsInAnyOrder("limited_user", "auditor"));
+
+        assertThat(map.get("metadata"), instanceOf(Map.class));
+        final Map<?, ?> metadata = (Map<?, ?>) map.get("metadata");
+        assertThat(metadata.get("oidc(sub)"), equalTo("alice"));
+        assertThat(metadata.get("oidc(iss)"), equalTo("http://localhost:8080"));
+    }
+
+
+    private PrepareAuthResponse getRedirectedFromFacilitator(String realmName) throws Exception {
+        final Map<String, String> body = Collections.singletonMap("realm", realmName);
+        Request request = buildRequest("POST", "/_security/oidc/prepare", body, facilitatorAuth());
+        final Response prepare = client().performRequest(request);
+        assertOK(prepare);
+        final Map<String, Object> responseBody = parseResponseAsMap(prepare.getEntity());
+        logger.info("Created OpenIDConnect authentication request {}", responseBody);
+        final String state = (String) responseBody.get("state");
+        final String nonce = (String) responseBody.get("nonce");
+        final String authUri = (String) responseBody.get("redirect");
+        return new PrepareAuthResponse(new URI(authUri), state, nonce);
+    }
+
+    private Tuple<String, String> completeAuthentication(String redirectUri, String state, String nonce) throws Exception {
+        final Map<String, String> body = new HashMap<>();
+        body.put("redirect_uri", redirectUri);
+        body.put("state", state);
+        body.put("nonce", nonce);
+        Request request = buildRequest("POST", "/_security/oidc/authenticate", body, facilitatorAuth());
+        final Response authenticate = client().performRequest(request);
+        assertOK(authenticate);
+        final Map<String, Object> responseBody = parseResponseAsMap(authenticate.getEntity());
+        logger.info(" OpenIDConnect authentication response {}", responseBody);
+        assertNotNull(responseBody.get("access_token"));
+        assertNotNull(responseBody.get("refresh_token"));
+        return new Tuple(responseBody.get("access_token"), responseBody.get("refresh_token"));
+    }
+
+    private Request buildRequest(String method, String endpoint, Map<String, ?> body, Header... headers) throws IOException {
+        Request request = new Request(method, endpoint);
+        XContentBuilder builder = XContentFactory.jsonBuilder().map(body);
+        if (body != null) {
+            request.setJsonEntity(BytesReference.bytes(builder).utf8ToString());
+        }
+        final RequestOptions.Builder options = request.getOptions().toBuilder();
+        for (Header header : headers) {
+            options.addHeader(header.getName(), header.getValue());
+        }
+        request.setOptions(options);
+        return request;
+    }
+
+    private static BasicHeader facilitatorAuth() {
+        final String auth =
+            UsernamePasswordToken.basicAuthHeaderValue("facilitator", new SecureString(FACILITATOR_PASSWORD.toCharArray()));
+        return new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, auth);
+    }
+
+    private Map<String, Object> parseResponseAsMap(HttpEntity entity) throws IOException {
+        return convertToMap(XContentType.JSON.xContent(), entity.getContent(), false);
+    }
+
+
+    private void assertHttpOk(StatusLine status) {
+        assertThat("Unexpected HTTP Response status: " + status, status.getStatusCode(), Matchers.equalTo(200));
+    }
+
+    /**
+     * We create a user named `facilitator` with the appropriate privileges ( `manage_oidc` ). A facilitator web app
+     * would need to create one also, in order to access the OIDC related APIs on behalf of the user.
+     */
+    private void setFacilitatorUser() throws IOException {
+        Request createRoleRequest = new Request("PUT", "/_security/role/facilitator");
+        createRoleRequest.setJsonEntity("{ \"cluster\" : [\"manage_oidc\", \"manage_token\"] }");
+        adminClient().performRequest(createRoleRequest);
+        Request createUserRequest = new Request("PUT", "/_security/user/facilitator");
+        createUserRequest.setJsonEntity("{ \"password\" : \"" + FACILITATOR_PASSWORD + "\", \"roles\" : [\"facilitator\"] }");
+        adminClient().performRequest(createUserRequest);
+    }
+
+    private void setRoleMappings() throws IOException {
+        Request createRoleMappingRequest = new Request("PUT", "/_security/role_mapping/oidc_kibana");
+        createRoleMappingRequest.setJsonEntity("{ \"roles\" : [\"kibana_user\"]," +
+            "\"enabled\": true," +
+            "\"rules\": {" +
+            "\"field\": { \"realm.name\": \"" + REALM_NAME + "\"}" +
+            "}" +
+            "}");
+        adminClient().performRequest(createRoleMappingRequest);
+
+        createRoleMappingRequest = new Request("PUT", "/_security/role_mapping/oidc_limited");
+        createRoleMappingRequest.setJsonEntity("{ \"roles\" : [\"limited_user\"]," +
+            "\"enabled\": true," +
+            "\"rules\": {" +
+            "\"field\": { \"realm.name\": \"" + REALM_NAME_IMPLICIT + "\"}" +
+            "}" +
+            "}");
+        adminClient().performRequest(createRoleMappingRequest);
+
+        createRoleMappingRequest = new Request("PUT", "/_security/role_mapping/oidc_auditor");
+        createRoleMappingRequest.setJsonEntity("{ \"roles\" : [\"auditor\"]," +
+            "\"enabled\": true," +
+            "\"rules\": {" +
+            "\"field\": { \"groups\": \"audit\"}" +
+            "}" +
+            "}");
+        adminClient().performRequest(createRoleMappingRequest);
+    }
+
+
+    /**
+     * Simple POJO encapsulating a response to calling /_security/oidc/prepare
+     */
+    class PrepareAuthResponse {
+        private URI authUri;
+        private String state;
+        private String nonce;
+
+        PrepareAuthResponse(URI authUri, String state, String nonce) {
+            this.authUri = authUri;
+            this.state = state;
+            this.nonce = nonce;
+        }
+
+        URI getAuthUri() {
+            return authUri;
+        }
+
+        String getState() {
+            return state;
+        }
+
+        String getNonce() {
+            return nonce;
+        }
+    }
+}
diff --git a/x-pack/test/idp-fixture/docker-compose.yml b/x-pack/test/idp-fixture/docker-compose.yml
index 53fb6285516..c549fbbfa5d 100644
--- a/x-pack/test/idp-fixture/docker-compose.yml
+++ b/x-pack/test/idp-fixture/docker-compose.yml
@@ -38,3 +38,10 @@ services:
       - ./idp/shibboleth-idp/conf:/opt/shibboleth-idp/conf
       - ./idp/shibboleth-idp/credentials:/opt/shibboleth-idp/credentials
       - ./idp/shib-jetty-base/start.d/ssl.ini:/opt/shib-jetty-base/start.d/ssl.ini
+
+  oidc-provider:
+    image: "c2id/c2id-server:7.8"
+    ports:
+      - "8080"
+    volumes:
+      - ./oidc/override.properties:/etc/c2id/override.properties
\ No newline at end of file
diff --git a/x-pack/test/idp-fixture/oidc/op-jwks.json b/x-pack/test/idp-fixture/oidc/op-jwks.json
new file mode 100644
index 00000000000..7a26fb7714c
--- /dev/null
+++ b/x-pack/test/idp-fixture/oidc/op-jwks.json
@@ -0,0 +1 @@
+{"keys":[{"kty":"RSA","e":"AQAB","use":"sig","kid":"CXup","n":"hrwD-lc-IwzwidCANmy4qsiZk11yp9kHykOuP0yOnwi36VomYTQVEzZXgh2sDJpGgAutdQudgwLoV8tVSsTG9SQHgJjH9Pd_9V4Ab6PANyZNG6DSeiq1QfiFlEP6Obt0JbRB3W7X2vkxOVaNoWrYskZodxU2V0ogeVL_LkcCGAyNu2jdx3j0DjJatNVk7ystNxb9RfHhJGgpiIkO5S3QiSIVhbBKaJHcZHPF1vq9g0JMGuUCI-OTSVg6XBkTLEGw1C_R73WD_oVEBfdXbXnLukoLHBS11p3OxU7f4rfxA_f_72_UwmWGJnsqS3iahbms3FkvqoL9x_Vj3GhuJSf97Q"},{"kty":"EC","use":"sig","crv":"P-256","kid":"yGvt","x":"pvgdqM3RCshljmuCF1D2Ez1w5ei5k7-bpimWLPNeEHI","y":"JSmUhbUTqiFclVLEdw6dz038F7Whw4URobjXbAReDuM"},{"kty":"EC","use":"sig","crv":"P-384","kid":"9nHY","x":"JPKhjhE0Bj579Mgj3Cn3ERGA8fKVYoGOaV9BPKhtnEobphf8w4GSeigMesL-038W","y":"UbJa1QRX7fo9LxSlh7FOH5ABT5lEtiQeQUcX9BW0bpJFlEVGqwec80tYLdOIl59M"},{"kty":"EC","use":"sig","crv":"P-521","kid":"tVzS","x":"AZgkRHlIyNQJlPIwTWdHqouw41k9dS3GJO04BDEnJnd_Dd1owlCn9SMXA-JuXINn4slwbG4wcECbctXb2cvdGtmn","y":"AdBC6N9lpupzfzcIY3JLIuc8y8MnzV-ItmzHQcC5lYWMTbuM9NU_FlvINeVo8g6i4YZms2xFB-B0VVdaoF9kUswC"}]}
\ No newline at end of file
diff --git a/x-pack/test/idp-fixture/oidc/override.properties b/x-pack/test/idp-fixture/oidc/override.properties
new file mode 100644
index 00000000000..888bde9acb4
--- /dev/null
+++ b/x-pack/test/idp-fixture/oidc/override.properties
@@ -0,0 +1,4 @@
+op.issuer=http://localhost:8080
+op.authz.endpoint=http://localhost:8080/c2id-login/
+op.reg.apiAccessTokenSHA256=d1c4fa70d9ee708d13cfa01daa0e060a05a2075a53c5cc1ad79e460e96ab5363
+jose.jwkSer=RnVsbCBrZXk6CnsKICAia2V5cyI6IFsKICAgIHsKICAgICAgInAiOiAiLXhhN2d2aW5tY3N3QXU3Vm1mV2loZ2o3U3gzUzhmd2dFSTdMZEVveW5FU1RzcElaeUY5aHc0NVhQZmI5VHlpbzZsOHZTS0F5RmU4T2lOalpkNE1Ra0ttYlJzTmxxR1Y5VlBoWF84UG1JSm5mcGVhb3E5YnZfU0k1blZHUl9zYUUzZE9sTEE2VWpaS0lsRVBNb0ZuRlZCMUFaUU9qQlhRRzZPTDg2eDZ2NHMwIiwKICAgICAgImt0eSI6ICJSU0EiLAogICAgICAicSI6ICJ2Q3pDQUlpdHV0MGx1V0djQloyLUFabURLc1RxNkkxcUp0RmlEYkIyZFBNQVlBNldOWTdaWEZoVWxsSjJrT2ZELWdlYjlkYkN2ODBxNEwyajVZSjZoOTBUc1NRWWVHRlljN1lZMGdCMU5VR3l5cXctb29QN0EtYlJmMGI3b3I4ajZJb0hzQTZKa2JranN6c3otbkJ2U2RmUURlZkRNSVc3Ni1ZWjN0c2hsY2MiLAogICAgICAiZCI6ICJtbFBOcm1zVVM5UmJtX1I5SElyeHdmeFYzZnJ2QzlaQktFZzRzc1ZZaThfY09lSjV2U1hyQV9laEtwa2g4QVhYaUdWUGpQbVlyd29xQzFVUksxUkZmLVg0dG10emV2OUVHaU12Z0JCaEF5RkdTSUd0VUNla2x4Q2dhb3BpMXdZSU1Bd0M0STZwMUtaZURxTVNCWVZGeHA5ZWlJZ2pwb05JbV9lR3hXUUs5VHNnYmk5T3lyc1VqaE9KLVczN2JVMEJWUU56UXpxODhCcGxmNzM3VmV1dy1FeDZaMk1iWXR3SWdfZ0JVb0JEZ0NrZkhoOVE4MElYcEZRV0x1RzgwenFrdkVwTHZ0RWxLbDRvQ3BHVnBjcmFUOFNsOGpYc3FDT1k0dnVRT19LRVUzS2VPNUNJbHd4eEhJYXZjQTE5cHFpSWJ5cm1LbThxS0ZEWHluUFJMSGFNZ1EiLAogICAgICAiZSI6ICJBUUFCIiwKICAgICAgImtpZCI6ICJyc2EzODRfMjA0OCIsCiAgICAgICJxaSI6ICJzMldTamVrVDl3S2JPbk9neGNoaDJPY3VubzE2Y20wS281Z3hoUWJTdVMyMldfUjJBR2ZVdkRieGF0cTRLakQ3THo3X1k2TjdTUkwzUVpudVhoZ1djeXgyNGhrUGppQUZLNmlkYVZKQzJqQmgycEZTUDVTNXZxZ0lsME12eWY4NjlwdkN4S0NzaGRKMGdlRWhveE93VkRPYXJqdTl2Zm9IQV90LWJoRlZrUnciLAogICAgICAiZHAiOiAiQlJhQTFqYVRydG9mTHZBSUJBYW1OSEVhSm51RU9zTVJJMFRCZXFuR1BNUm0tY2RjSG1OUVo5WUtqb2JpdXlmbnhGZ0piVDlSeElBRG0ySkpoZEp5RTN4Y1dTSzhmSjBSM1Jick1aT1dwako0QmJTVzFtU1VtRnlKTGxib3puRFhZR2RaZ1hzS0o1UkFrRUNQZFBCY3YwZVlkbk9NYWhfZndfaFZoNjRuZ2tFIiwKICAgICAgImFsZyI6ICJSU0EzODQiLAogICAgICAiZHEiOiAiUFJoVERKVlR3cDNXaDZfWFZrTjIwMUlpTWhxcElrUDN1UTYyUlRlTDNrQ2ZXSkNqMkZPLTRxcVRIQk0tQjZJWUVPLXpoVWZyQnhiMzJ1djNjS2JDWGFZN3BJSFJxQlFEQWQ2WGhHYzlwc0xqNThXd3VGY2RncERJYUFpRjNyc3NUMjJ4UFVvYkJFTVdBalV3bFJrNEtNTjItMnpLQk5FR3lIcDIzOUpKdnpVIiwKICAgICAgIm4iOiAidUpDWDVDbEZpM0JnTXBvOWhRSVZ2SDh0Vi1jLTVFdG5OeUZxVm91R3NlNWwyUG92MWJGb0tsRllsU25YTzNWUE9KRWR3azNDdl9VT0UtQzlqZERYRHpvS3Z4RURaTVM1TDZWMFpIVEJoNndIOV9iN3JHSlBxLV9RdlNkejczSzZxbHpGaUtQamRvdTF6VlFYTmZfblBZbnRnQkdNRUtBc1pRNGp0cWJCdE5lV0h0MF9UM001cEktTV9KNGVlRWpCTW95TkZuU2ExTEZDVmZRNl9YVnpjelp1TlRGMlh6UmdRWkFmcmJGRXZ6eXR1TzVMZTNTTXFrUUFJeDhFQmkwYXVlRUNqNEQ4cDNVNXFVRG92NEF2VnRJbUZlbFJvb1pBMHJtVW1KRHJ4WExrVkhuVUpzaUF6ZW9TLTNBSnV1bHJkMGpuNjJ5VjZHV2dFWklZMVNlZVd3IgogICAgfQogIF0KfQo
\ No newline at end of file

From d30fec4914bcdab3a018106c22dc0c27679320e0 Mon Sep 17 00:00:00 2001
From: Jim Ferenczi <jim.ferenczi@elastic.co>
Date: Mon, 15 Apr 2019 09:46:50 +0200
Subject: [PATCH 018/112] Full text queries should not always ignore unmapped
 fields (#41062)

Full text queries ignore unmapped fields since https://github.com/elastic/elasticsearch/issues/41022
even if all fields in the query are unmapped.
This change makes sure that we ignore unmapped fields only if they are mixed
with mapped fields and returns a MatchNoDocsQuery otherwise.

Closes #41022
---
 .../common/lucene/search/Queries.java         |  8 ++++-
 .../index/search/MultiMatchQuery.java         |  8 ++++-
 .../query/QueryStringQueryBuilderTests.java   | 31 +++++++++++++++++--
 .../query/SimpleQueryStringBuilderTests.java  | 11 +++++++
 4 files changed, 53 insertions(+), 5 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java
index dd4389c2d6b..56d1b5cedc3 100644
--- a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java
+++ b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java
@@ -36,6 +36,8 @@ import org.elasticsearch.common.Nullable;
 import org.elasticsearch.index.mapper.SeqNoFieldMapper;
 import org.elasticsearch.index.mapper.TypeFieldMapper;
 
+import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import java.util.regex.Pattern;
 
@@ -52,7 +54,11 @@ public class Queries {
 
 
     public static Query newUnmappedFieldQuery(String field) {
-        return Queries.newMatchNoDocsQuery("unmapped field [" + (field != null ? field : "null") + "]");
+        return newUnmappedFieldsQuery(Collections.singletonList(field));
+    }
+
+    public static Query newUnmappedFieldsQuery(Collection<String> fields) {
+        return Queries.newMatchNoDocsQuery("unmapped fields " + fields);
     }
 
     public static Query newLenientFieldQuery(String field, RuntimeException e) {
diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java
index 667d3a3823d..b9943870df7 100644
--- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java
+++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java
@@ -59,6 +59,12 @@ public class MultiMatchQuery extends MatchQuery {
 
     public Query parse(MultiMatchQueryBuilder.Type type, Map<String, Float> fieldNames,
                        Object value, String minimumShouldMatch) throws IOException {
+        boolean hasMappedField = fieldNames.keySet().stream()
+            .anyMatch(k -> context.fieldMapper(k) != null);
+        if (hasMappedField == false) {
+            // all query fields are unmapped
+            return Queries.newUnmappedFieldsQuery(fieldNames.keySet());
+        }
         final float tieBreaker = groupTieBreaker == null ? type.tieBreaker() : groupTieBreaker;
         final List<Query> queries;
         switch (type) {
@@ -91,7 +97,7 @@ public class MultiMatchQuery extends MatchQuery {
     }
 
     private List<Query> buildFieldQueries(MultiMatchQueryBuilder.Type type, Map<String, Float> fieldNames,
-                                          Object value, String minimumShouldMatch) throws IOException{
+                                          Object value, String minimumShouldMatch) throws IOException {
         List<Query> queries = new ArrayList<>();
         for (String fieldName : fieldNames.keySet()) {
             if (context.fieldMapper(fieldName) == null) {
diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java
index 7181c1de1fb..9ca77ae3e1b 100644
--- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java
@@ -1210,13 +1210,13 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
             .field("unmapped_field")
             .lenient(true)
             .toQuery(createShardContext());
-        assertEquals(new BooleanQuery.Builder().build(), query);
+        assertEquals(new MatchNoDocsQuery(), query);
 
         // Unmapped prefix field
         query = new QueryStringQueryBuilder("unmapped_field:hello")
             .lenient(true)
             .toQuery(createShardContext());
-        assertEquals(new BooleanQuery.Builder().build(), query);
+        assertEquals(new MatchNoDocsQuery(), query);
 
         // Unmapped fields
         query = new QueryStringQueryBuilder("hello")
@@ -1224,7 +1224,32 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
             .field("unmapped_field")
             .field("another_field")
             .toQuery(createShardContext());
-        assertEquals(new BooleanQuery.Builder().build(), query);
+        assertEquals(new MatchNoDocsQuery(), query);
+
+        // Multi block
+        query = new QueryStringQueryBuilder("first unmapped:second")
+            .field(STRING_FIELD_NAME)
+            .field("unmapped")
+            .field("another_unmapped")
+            .defaultOperator(Operator.AND)
+            .toQuery(createShardContext());
+        BooleanQuery expected = new BooleanQuery.Builder()
+            .add(new TermQuery(new Term(STRING_FIELD_NAME, "first")), BooleanClause.Occur.MUST)
+            .add(new MatchNoDocsQuery(), BooleanClause.Occur.MUST)
+            .build();
+        assertEquals(expected, query);
+
+        query = new SimpleQueryStringBuilder("first unknown:second")
+            .field("unmapped")
+            .field("another_unmapped")
+            .defaultOperator(Operator.AND)
+            .toQuery(createShardContext());
+        expected = new BooleanQuery.Builder()
+            .add(new MatchNoDocsQuery(), BooleanClause.Occur.MUST)
+            .add(new MatchNoDocsQuery(), BooleanClause.Occur.MUST)
+            .build();
+        assertEquals(expected, query);
+
     }
 
     public void testDefaultField() throws Exception {
diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java
index 3242f343379..2bb289ddc11 100644
--- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java
@@ -717,6 +717,17 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQ
                 .add(new TermQuery(new Term(STRING_FIELD_NAME, "second")), BooleanClause.Occur.MUST)
                 .build();
         assertEquals(expected, query);
+        query = new SimpleQueryStringBuilder("first & second")
+            .field("unmapped")
+            .field("another_unmapped")
+            .defaultOperator(Operator.AND)
+            .toQuery(createShardContext());
+        expected = new BooleanQuery.Builder()
+            .add(new MatchNoDocsQuery(), BooleanClause.Occur.MUST)
+            .add(new MatchNoDocsQuery(), BooleanClause.Occur.MUST)
+            .add(new MatchNoDocsQuery(), BooleanClause.Occur.MUST)
+            .build();
+        assertEquals(expected, query);
     }
 
     public void testNegativeFieldBoost() {

From 414debd740421a7d5b8ef9ab3e571e0f60c6ea71 Mon Sep 17 00:00:00 2001
From: Guilherme Ferreira <guilhermeaferreira_t@yahoo.com.br>
Date: Mon, 15 Apr 2019 14:09:46 +0200
Subject: [PATCH 019/112] [Docs] Correct spelling the "_none_" stopwords
 element (#41191)

---
 docs/reference/analysis/analyzers/standard-analyzer.asciidoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc
index 3097ece21db..5117763b668 100644
--- a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc
+++ b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc
@@ -132,7 +132,7 @@ The `standard` analyzer accepts the following parameters:
 `stopwords`::
 
     A pre-defined stop words list like `_english_` or an array  containing a
-    list of stop words.  Defaults to `\_none_`.
+    list of stop words.  Defaults to `_none_`.
 
 `stopwords_path`::
 

From 9e94db1b6a49f3a22b372ad2c328ebab8b5133d8 Mon Sep 17 00:00:00 2001
From: Alpar Torok <torokalpar@gmail.com>
Date: Mon, 15 Apr 2019 15:24:01 +0300
Subject: [PATCH 020/112] Mute failing test

Tracked in #41172
---
 .../rest-api-spec/test/mustache/50_webhook_url_escaping.yml    | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml
index 01326f9764f..e11809a79ba 100644
--- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml
+++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml
@@ -1,5 +1,8 @@
 ---
 "Test url escaping with url mustache function":
+  - skip:
+        version: "all"
+        reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/41172"
   - do:
       cluster.health:
           wait_for_status: yellow

From 23e40c040ae663892b4880372697d857ecc775a7 Mon Sep 17 00:00:00 2001
From: Guilherme Ferreira <guilhermeaferreira_t@yahoo.com.br>
Date: Mon, 15 Apr 2019 15:10:57 +0200
Subject: [PATCH 021/112] [Docs] Correct spelling of "_none_" (#41192)

---
 docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc
index 3167a4342ac..259bf785b5b 100644
--- a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc
@@ -78,4 +78,4 @@ Elasticsearch provides the following predefined list of languages:
 `_portuguese_`, `_romanian_`, `_russian_`, `_sorani_`, `_spanish_`,
 `_swedish_`, `_thai_`, `_turkish_`.
 
-For the empty stopwords list (to disable stopwords) use: `\_none_`.
+For the empty stopwords list (to disable stopwords) use: `_none_`.

From 3f00c29adb41e6f94a44da03607c0c3c93c8e6cb Mon Sep 17 00:00:00 2001
From: David Roberts <dave.roberts@elastic.co>
Date: Mon, 15 Apr 2019 14:30:30 +0100
Subject: [PATCH 022/112] [ML] Allow xpack.ml.max_machine_memory_percent higher
 than 100% (#41193)

Values higher than 100% are now allowed to accommodate use
cases where swapping has been determined to be acceptable.
Anomaly detector jobs only use their full model memory
during background persistence, and this is deliberately
staggered, so with large numbers of jobs few will generally
be persisting state at the same time.  Settings higher than
available memory are only recommended for OEM type
situations where a wrapper tightly controls the types of
jobs that can be created, and each job alone is considerably
smaller than what each node can handle.
---
 .../xpack/ml/MachineLearning.java             |  8 ++++++-
 .../xpack/ml/MachineLearningTests.java        | 23 +++++++++++++++++++
 2 files changed, 30 insertions(+), 1 deletion(-)

diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java
index b69e7b786a7..281a374b97b 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java
@@ -271,8 +271,14 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu
     public static final String MACHINE_MEMORY_NODE_ATTR = "ml.machine_memory";
     public static final Setting<Integer> CONCURRENT_JOB_ALLOCATIONS =
             Setting.intSetting("xpack.ml.node_concurrent_job_allocations", 2, 0, Property.Dynamic, Property.NodeScope);
+    // Values higher than 100% are allowed to accommodate use cases where swapping has been determined to be acceptable.
+    // Anomaly detector jobs only use their full model memory during background persistence, and this is deliberately
+    // staggered, so with large numbers of jobs few will generally be persisting state at the same time.
+    // Settings higher than available memory are only recommended for OEM type situations where a wrapper tightly
+    // controls the types of jobs that can be created, and each job alone is considerably smaller than what each node
+    // can handle.
     public static final Setting<Integer> MAX_MACHINE_MEMORY_PERCENT =
-            Setting.intSetting("xpack.ml.max_machine_memory_percent", 30, 5, 90, Property.Dynamic, Property.NodeScope);
+            Setting.intSetting("xpack.ml.max_machine_memory_percent", 30, 5, 200, Property.Dynamic, Property.NodeScope);
     public static final Setting<Integer> MAX_LAZY_ML_NODES =
             Setting.intSetting("xpack.ml.max_lazy_ml_nodes", 0, 0, 3, Property.Dynamic, Property.NodeScope);
 
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java
index 2c296691c24..9504cbe7a70 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java
@@ -31,6 +31,29 @@ public class MachineLearningTests extends ESTestCase {
         assertEquals(7, maxOpenWorkers);
     }
 
+    public void testMaxMachineMemoryPercent_givenDefault() {
+        int maxMachineMemoryPercent = MachineLearning.MAX_MACHINE_MEMORY_PERCENT.get(Settings.EMPTY);
+        assertEquals(30, maxMachineMemoryPercent);
+    }
+
+    public void testMaxMachineMemoryPercent_givenValidSetting() {
+        Settings.Builder settings = Settings.builder();
+        int expectedMaxMachineMemoryPercent = randomIntBetween(5, 200);
+        settings.put(MachineLearning.MAX_MACHINE_MEMORY_PERCENT.getKey(), expectedMaxMachineMemoryPercent);
+        int maxMachineMemoryPercent = MachineLearning.MAX_MACHINE_MEMORY_PERCENT.get(settings.build());
+        assertEquals(expectedMaxMachineMemoryPercent, maxMachineMemoryPercent);
+    }
+
+    public void testMaxMachineMemoryPercent_givenInvalidSetting() {
+        Settings.Builder settings = Settings.builder();
+        int invalidMaxMachineMemoryPercent = randomFrom(4, 201);
+        settings.put(MachineLearning.MAX_MACHINE_MEMORY_PERCENT.getKey(), invalidMaxMachineMemoryPercent);
+        IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+            () -> MachineLearning.MAX_MACHINE_MEMORY_PERCENT.get(settings.build()));
+        assertThat(e.getMessage(), startsWith("Failed to parse value [" + invalidMaxMachineMemoryPercent
+            + "] for setting [xpack.ml.max_machine_memory_percent] must be"));
+    }
+
     public void testNoAttributes_givenNoClash() {
         Settings.Builder builder = Settings.builder();
         if (randomBoolean()) {

From 2b539f83474cd3566d2b713102aa1cf6b88a3abf Mon Sep 17 00:00:00 2001
From: David Kyle <david.kyle@elastic.co>
Date: Mon, 15 Apr 2019 14:33:16 +0100
Subject: [PATCH 023/112] [ML DataFrame] Data Frame stop all (#41156)

Wild card support for the data frame stop API
---
 .../dataframe/stop_data_frame.asciidoc        |  4 +-
 .../data-frames/apis/stop-transform.asciidoc  | 12 +++-
 .../action/StopDataFrameTransformAction.java  | 36 ++++++++++--
 ...pDataFrameTransformActionRequestTests.java | 34 +++++++++++-
 ...TransportStopDataFrameTransformAction.java | 51 ++++++++++++++---
 ...portStopDataFrameTransformActionTests.java | 55 +++++++++++++++++++
 .../test/data_frame/transforms_start_stop.yml | 42 ++++++++++++++
 7 files changed, 218 insertions(+), 16 deletions(-)
 create mode 100644 x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java

diff --git a/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc
index c91c228e0ea..9b05687c008 100644
--- a/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc
+++ b/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc
@@ -12,7 +12,9 @@ It accepts a +{request}+ object and responds with a +{response}+ object.
 [id="{upid}-{api}-request"]
 ==== Stop Data Frame Request
 
-A +{request}+ object requires a non-null `id`.
+A +{request}+ object requires a non-null `id`. `id` can be a comma separated list of Ids
+or a single Id. Wildcards, `*` and `_all` are also accepted.
+
 
 ["source","java",subs="attributes,callouts,macros"]
 ---------------------------------------------------
diff --git a/docs/reference/data-frames/apis/stop-transform.asciidoc b/docs/reference/data-frames/apis/stop-transform.asciidoc
index 179646ddb47..2eb5a2d641b 100644
--- a/docs/reference/data-frames/apis/stop-transform.asciidoc
+++ b/docs/reference/data-frames/apis/stop-transform.asciidoc
@@ -10,9 +10,17 @@ Stops one or more {dataframe-transforms}.
 
 ==== Request
 
-`POST _data_frame/transforms/<data_frame_transform_id>/_stop`
+`POST _data_frame/transforms/<data_frame_transform_id>/_stop` +
 
-//==== Description
+`POST _data_frame/transforms/<data_frame_transform_id1>,<data_frame_transform_id2>/_stop` +
+
+`POST _data_frame/transforms/_all/_stop`
+
+
+==== Description
+You can stop multiple {dataframe-transforms} in a single API request by using a
+comma-separated list of {dataframe-transforms} or a wildcard expression.
+All {dataframe-transforms} can be stopped by using `_all` or `*` as the `<data_frame_transform_id>`.
 
 ==== Path Parameters
 
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java
index 7fa437bd156..54153aab91c 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java
@@ -21,8 +21,11 @@ import org.elasticsearch.xpack.core.dataframe.DataFrameField;
 import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.Objects;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
 public class StopDataFrameTransformAction extends Action<StopDataFrameTransformAction.Response> {
@@ -45,6 +48,7 @@ public class StopDataFrameTransformAction extends Action<StopDataFrameTransformA
         private String id;
         private final boolean waitForCompletion;
         private final boolean force;
+        private Set<String> expandedIds;
 
         public Request(String id, boolean waitForCompletion, boolean force, @Nullable TimeValue timeout) {
             this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName());
@@ -64,6 +68,9 @@ public class StopDataFrameTransformAction extends Action<StopDataFrameTransformA
             id = in.readString();
             waitForCompletion = in.readBoolean();
             force = in.readBoolean();
+            if (in.readBoolean()) {
+                expandedIds = new HashSet<>(Arrays.asList(in.readStringArray()));
+            }
         }
 
         public String getId() {
@@ -82,12 +89,25 @@ public class StopDataFrameTransformAction extends Action<StopDataFrameTransformA
             return force;
         }
 
+        public Set<String> getExpandedIds() {
+            return expandedIds;
+        }
+
+        public void setExpandedIds(Set<String> expandedIds ) {
+            this.expandedIds = expandedIds;
+        }
+
         @Override
         public void writeTo(StreamOutput out) throws IOException {
             super.writeTo(out);
             out.writeString(id);
             out.writeBoolean(waitForCompletion);
             out.writeBoolean(force);
+            boolean hasExpandedIds = expandedIds != null;
+            out.writeBoolean(hasExpandedIds);
+            if (hasExpandedIds) {
+                out.writeStringArray(expandedIds.toArray(new String[0]));
+            }
         }
 
         @Override
@@ -98,7 +118,7 @@ public class StopDataFrameTransformAction extends Action<StopDataFrameTransformA
         @Override
         public int hashCode() {
             // the base class does not implement hashCode, therefore we need to hash timeout ourselves
-            return Objects.hash(id, waitForCompletion, force, this.getTimeout());
+            return Objects.hash(id, waitForCompletion, force, expandedIds, this.getTimeout());
         }
 
         @Override
@@ -118,15 +138,21 @@ public class StopDataFrameTransformAction extends Action<StopDataFrameTransformA
             }
 
             return Objects.equals(id, other.id) &&
-                Objects.equals(waitForCompletion, other.waitForCompletion) &&
-                Objects.equals(force, other.force);
+                    Objects.equals(waitForCompletion, other.waitForCompletion) &&
+                    Objects.equals(force, other.force) &&
+                    Objects.equals(expandedIds, other.expandedIds);
         }
 
         @Override
         public boolean match(Task task) {
-            String expectedDescription = DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + id;
+            if (task.getDescription().startsWith(DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX)) {
+                String id = task.getDescription().substring(DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX.length());
+                if (expandedIds != null) {
+                    return expandedIds.contains(id);
+                }
+            }
 
-            return task.getDescription().equals(expectedDescription);
+            return false;
         }
     }
 
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionRequestTests.java
index c117e249aef..618b6420a77 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionRequestTests.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionRequestTests.java
@@ -8,15 +8,26 @@ package org.elasticsearch.xpack.core.dataframe.action;
 
 import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
 import org.elasticsearch.test.AbstractWireSerializingTestCase;
+import org.elasticsearch.xpack.core.dataframe.DataFrameField;
 import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction.Request;
 
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+
 public class StopDataFrameTransformActionRequestTests extends AbstractWireSerializingTestCase<Request> {
 
     @Override
     protected Request createTestInstance() {
         TimeValue timeout = randomBoolean() ? TimeValue.timeValueMinutes(randomIntBetween(1, 10)) : null;
-        return new Request(randomAlphaOfLengthBetween(1, 10), randomBoolean(), randomBoolean(), timeout);
+        Request request = new Request(randomAlphaOfLengthBetween(1, 10), randomBoolean(), randomBoolean(), timeout);
+        if (randomBoolean()) {
+            request.setExpandedIds(new HashSet<>(Arrays.asList(generateRandomStringArray(5, 6, false))));
+        }
+        return request;
     }
 
     @Override
@@ -35,4 +46,25 @@ public class StopDataFrameTransformActionRequestTests extends AbstractWireSerial
         assertNotEquals(r1,r2);
         assertNotEquals(r1.hashCode(),r2.hashCode());
     }
+
+    public void testMatch() {
+        String dataFrameId = "dataframe-id";
+
+        Task dataFrameTask = new Task(1L, "persistent", "action",
+                DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + dataFrameId,
+                TaskId.EMPTY_TASK_ID, Collections.emptyMap());
+
+        Request request = new Request("unrelated", false, false, null);
+        request.setExpandedIds(new HashSet<>(Arrays.asList("foo", "bar")));
+        assertFalse(request.match(dataFrameTask));
+
+        Request matchingRequest = new Request(dataFrameId, false, false, null);
+        matchingRequest.setExpandedIds(Collections.singleton(dataFrameId));
+        assertTrue(matchingRequest.match(dataFrameTask));
+
+        Task notADataFrameTask = new Task(1L, "persistent", "action",
+                "some other task, say monitoring",
+                TaskId.EMPTY_TASK_ID, Collections.emptyMap());
+        assertFalse(matchingRequest.match(notADataFrameTask));
+    }
 }
diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java
index 2234226a501..2092493caaf 100644
--- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java
+++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java
@@ -13,20 +13,27 @@ import org.elasticsearch.action.FailedNodeException;
 import org.elasticsearch.action.TaskOperationFailure;
 import org.elasticsearch.action.support.ActionFilters;
 import org.elasticsearch.action.support.tasks.TransportTasksAction;
+import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.inject.Inject;
 import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
 import org.elasticsearch.rest.RestStatus;
 import org.elasticsearch.tasks.Task;
 import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.xpack.core.action.util.PageParams;
+import org.elasticsearch.xpack.core.dataframe.DataFrameField;
 import org.elasticsearch.xpack.core.dataframe.DataFrameMessages;
 import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction;
 import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState;
 import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager;
 import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask;
 
+import java.util.Collection;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import static org.elasticsearch.ExceptionsHelper.convertToElastic;
 import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
@@ -52,17 +59,28 @@ public class TransportStopDataFrameTransformAction extends
     @Override
     protected void doExecute(Task task, StopDataFrameTransformAction.Request request,
             ActionListener<StopDataFrameTransformAction.Response> listener) {
-        // Need to verify that the config actually exists
-        dataFrameTransformsConfigManager.getTransformConfiguration(request.getId(), ActionListener.wrap(
-            config -> super.doExecute(task, request, listener),
-            listener::onFailure
+
+        dataFrameTransformsConfigManager.expandTransformIds(request.getId(), new PageParams(0, 10_000), ActionListener.wrap(
+                expandedIds -> {
+                    request.setExpandedIds(new HashSet<>(expandedIds));
+                    request.setNodes(dataframeNodes(expandedIds, clusterService.state()));
+                    super.doExecute(task, request, listener);
+                },
+                listener::onFailure
         ));
     }
 
     @Override
     protected void taskOperation(StopDataFrameTransformAction.Request request, DataFrameTransformTask transformTask,
             ActionListener<StopDataFrameTransformAction.Response> listener) {
-        if (transformTask.getTransformId().equals(request.getId())) {
+
+        Set<String> ids = request.getExpandedIds();
+        if (ids == null) {
+            listener.onFailure(new IllegalStateException("Request does not have expandedIds set"));
+            return;
+        }
+
+        if (ids.contains(transformTask.getTransformId())) {
             if (transformTask.getState().getTaskState() == DataFrameTransformTaskState.FAILED && request.isForce() == false) {
                 listener.onFailure(
                     new ElasticsearchStatusException("Unable to stop data frame transform [" + request.getId()
@@ -138,9 +156,28 @@ public class TransportStopDataFrameTransformAction extends
             }
         }
 
-        assert tasks.size() == 1;
-
         boolean allStopped = tasks.stream().allMatch(StopDataFrameTransformAction.Response::isStopped);
         return new StopDataFrameTransformAction.Response(allStopped);
     }
+
+     static String[] dataframeNodes(List<String> dataFrameIds, ClusterState clusterState) {
+
+        Set<String> executorNodes = new HashSet<>();
+
+        PersistentTasksCustomMetaData tasksMetaData =
+                PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(clusterState);
+
+        if (tasksMetaData != null) {
+            Set<String> dataFrameIdsSet = new HashSet<>(dataFrameIds);
+
+            Collection<PersistentTasksCustomMetaData.PersistentTask<?>> tasks =
+                tasksMetaData.findTasks(DataFrameField.TASK_NAME, t -> dataFrameIdsSet.contains(t.getId()));
+
+            for (PersistentTasksCustomMetaData.PersistentTask<?> task : tasks) {
+                executorNodes.add(task.getExecutorNode());
+            }
+        }
+
+        return executorNodes.toArray(new String[0]);
+    }
 }
diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java
new file mode 100644
index 00000000000..4be532de2a2
--- /dev/null
+++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+
+package org.elasticsearch.xpack.dataframe.action;
+
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.xpack.core.dataframe.DataFrameField;
+import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform;
+import org.elasticsearch.xpack.core.ml.MlTasks;
+import org.elasticsearch.xpack.core.ml.action.OpenJobAction;
+
+import java.util.Arrays;
+import java.util.Collections;
+
+import static org.hamcrest.Matchers.hasItemInArray;
+
+public class TransportStopDataFrameTransformActionTests extends ESTestCase {
+
+    public void testDataframeNodes() {
+        String dataFrameIdFoo = "df-id-foo";
+        String dataFrameIdBar = "df-id-bar";
+
+        PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
+        tasksBuilder.addTask(dataFrameIdFoo,
+                DataFrameField.TASK_NAME, new DataFrameTransform(dataFrameIdFoo),
+                new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment"));
+        tasksBuilder.addTask(dataFrameIdBar,
+                DataFrameField.TASK_NAME, new DataFrameTransform(dataFrameIdBar),
+                new PersistentTasksCustomMetaData.Assignment("node-2", "test assignment"));
+        tasksBuilder.addTask(MlTasks.jobTaskId("foo-1"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-1"),
+                new PersistentTasksCustomMetaData.Assignment("node-3", "test assignment"));
+
+        ClusterState cs = ClusterState.builder(new ClusterName("_name"))
+                .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()))
+                .build();
+
+        String[] nodes = TransportStopDataFrameTransformAction.dataframeNodes(Arrays.asList(dataFrameIdFoo, dataFrameIdBar), cs);
+        assertEquals(2, nodes.length);
+        assertThat(nodes, hasItemInArray("node-1"));
+        assertThat(nodes, hasItemInArray("node-2"));
+    }
+
+    public void testDataframeNodes_NoTasks() {
+        ClusterState emptyState = ClusterState.builder(new ClusterName("_name")).build();
+        String[] nodes = TransportStopDataFrameTransformAction.dataframeNodes(Collections.singletonList("df-id"), emptyState);
+        assertEquals(0, nodes.length);
+    }
+}
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml
index 23a28e14a86..96f6b6d0a41 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml
@@ -179,3 +179,45 @@ teardown:
   - do:
       data_frame.delete_data_frame_transform:
         transform_id: "airline-transform-start-later"
+
+---
+"Test stop all":
+  - do:
+      data_frame.put_data_frame_transform:
+        transform_id: "airline-transform-stop-all"
+        body: >
+          {
+            "source": { "index": "airline-data" },
+            "dest": { "index": "airline-data-start-later" },
+            "pivot": {
+              "group_by": { "airline": {"terms": {"field": "airline"}}},
+              "aggs": {"avg_response": {"avg": {"field": "responsetime"}}}
+            }
+          }
+  - do:
+      data_frame.start_data_frame_transform:
+        transform_id: "airline-transform-stop-all"
+  - match: { started: true }
+
+  - do:
+      data_frame.start_data_frame_transform:
+        transform_id: "airline-transform-start-stop"
+  - match: { started: true }
+
+  - do:
+      data_frame.stop_data_frame_transform:
+        transform_id: "_all"
+  - match: { stopped: true }
+
+  - do:
+      data_frame.get_data_frame_transform_stats:
+        transform_id: "*"
+  - match: { count: 2 }
+  - match: { transforms.0.state.indexer_state: "stopped" }
+  - match: { transforms.0.state.task_state: "stopped" }
+  - match: { transforms.1.state.indexer_state: "stopped" }
+  - match: { transforms.1.state.task_state: "stopped" }
+
+  - do:
+      data_frame.delete_data_frame_transform:
+        transform_id: "airline-transform-stop-all"

From f19b052e0368ce8231687c6bfbd0479191fe6ce9 Mon Sep 17 00:00:00 2001
From: Zachary Tong <polyfractal@elastic.co>
Date: Mon, 15 Apr 2019 10:35:20 -0400
Subject: [PATCH 024/112] Better error messages when pipelines reference
 incompatible aggs (#40068)

Pipelines require single-valued agg or a numeric to be returned.
If they don't get that, they throw an exception.  Unfortunately, this
exception text is very confusing to users because it usually arises
from pathing "through" multiple terms aggs.  The final target is a numeric,
but it's the intermediary aggs that cause the problem.

This commit adds the current agg name to the exception message
so the user knows which "level" is the issue.
---
 .../test/search.aggregation/300_pipeline.yml  | 100 ++++++++++
 .../aggregations/pipeline/BucketHelpers.java  |  30 ++-
 .../pipeline/BucketHelpersTests.java          | 185 ++++++++++++++++++
 3 files changed, 312 insertions(+), 3 deletions(-)
 create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/300_pipeline.yml
 create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpersTests.java

diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/300_pipeline.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/300_pipeline.yml
new file mode 100644
index 00000000000..0016c9f9894
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/300_pipeline.yml
@@ -0,0 +1,100 @@
+setup:
+  - skip:
+      version: " - 7.99.99" #TODO change this after backport
+      reason:  These new error messages were added in 7.1
+
+  - do:
+      indices.create:
+          index: test_1
+          body:
+            settings:
+              number_of_replicas: 0
+            mappings:
+              properties:
+                int_field:
+                   type : integer
+
+  - do:
+       bulk:
+         refresh: true
+         body:
+           - index:
+               _index: test_1
+               _id:    1
+           - int_field: 1
+           - index:
+               _index: test_1
+               _id:    2
+           - int_field: 2
+           - index:
+               _index: test_1
+               _id:    3
+           - int_field: 3
+           - index:
+               _index: test_1
+               _id:    4
+           - int_field: 4
+
+---
+"Max pipeline through terms agg":
+
+  - do:
+      catch: /\[Object\[\]\] at aggregation \[the_terms_2\]/
+      search:
+        rest_total_hits_as_int: true
+        body:
+          aggs:
+            the_terms:
+              terms:
+                field: "int_field"
+              aggs:
+                the_terms_2:
+                  terms:
+                    field: "int_field"
+                  aggs:
+                    the_max:
+                      max:
+                        field: "int_field"
+            the_bad_max:
+              max_bucket:
+                buckets_path: "the_terms>the_terms_2>the_max"
+
+---
+"Max pipeline on terms agg":
+
+  - do:
+      catch: /\[LongTerms\] at aggregation \[the_terms_2\]/
+      search:
+        rest_total_hits_as_int: true
+        body:
+          aggs:
+            the_terms:
+              terms:
+                field: "int_field"
+              aggs:
+                the_terms_2:
+                  terms:
+                    field: "int_field"
+            the_bad_max:
+              max_bucket:
+                buckets_path: "the_terms>the_terms_2"
+
+---
+"Max pipeline on percentiles agg without specifying percent":
+
+  - do:
+      catch: /buckets_path must reference either a number value or a single value numeric metric aggregation, but \[the_percentiles\] contains multiple values. Please specify which to use\./
+      search:
+        rest_total_hits_as_int: true
+        body:
+          aggs:
+            the_terms:
+              terms:
+                field: "int_field"
+              aggs:
+                the_percentiles:
+                  percentiles:
+                    field: "int_field"
+            the_bad_max:
+              max_bucket:
+                buckets_path: "the_terms>the_percentiles"
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java
index d2c973ebec2..1a863a20982 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpers.java
@@ -156,6 +156,7 @@ public class BucketHelpers {
             InternalMultiBucketAggregation.InternalBucket bucket, List<String> aggPathAsList, GapPolicy gapPolicy) {
         try {
             Object propertyValue = bucket.getProperty(agg.getName(), aggPathAsList);
+
             if (propertyValue == null) {
                 throw new AggregationExecutionException(AbstractPipelineAggregationBuilder.BUCKETS_PATH_FIELD.getPreferredName()
                         + " must reference either a number value or a single value numeric metric aggregation");
@@ -166,9 +167,7 @@ public class BucketHelpers {
                 } else if (propertyValue instanceof InternalNumericMetricsAggregation.SingleValue) {
                     value = ((InternalNumericMetricsAggregation.SingleValue) propertyValue).value();
                 } else {
-                    throw new AggregationExecutionException(AbstractPipelineAggregationBuilder.BUCKETS_PATH_FIELD.getPreferredName()
-                            + " must reference either a number value or a single value numeric metric aggregation, got: "
-                            + propertyValue.getClass().getCanonicalName());
+                    throw formatResolutionError(agg, aggPathAsList, propertyValue);
                 }
                 // doc count never has missing values so gap policy doesn't apply here
                 boolean isDocCountProperty = aggPathAsList.size() == 1 && "_count".equals(aggPathAsList.get(0));
@@ -188,4 +187,29 @@ public class BucketHelpers {
             return null;
         }
     }
+
+    /**
+     * Inspects where we are in the agg tree and tries to format a helpful error
+     */
+    private static AggregationExecutionException formatResolutionError(MultiBucketsAggregation agg,
+                                                                       List<String> aggPathAsList, Object propertyValue) {
+        String currentAggName;
+        Object currentAgg;
+        if (aggPathAsList.isEmpty()) {
+            currentAggName = agg.getName();
+            currentAgg = agg;
+        } else {
+            currentAggName = aggPathAsList.get(0);
+            currentAgg = propertyValue;
+        }
+        if (currentAgg instanceof InternalNumericMetricsAggregation.MultiValue) {
+            return new AggregationExecutionException(AbstractPipelineAggregationBuilder.BUCKETS_PATH_FIELD.getPreferredName()
+                + " must reference either a number value or a single value numeric metric aggregation, but [" + currentAggName
+                + "] contains multiple values. Please specify which to use.");
+        } else {
+            return new AggregationExecutionException(AbstractPipelineAggregationBuilder.BUCKETS_PATH_FIELD.getPreferredName()
+                + " must reference either a number value or a single value numeric metric aggregation, got: ["
+                + propertyValue.getClass().getSimpleName() + "] at aggregation [" + currentAggName + "]");
+        }
+    }
 }
diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpersTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpersTests.java
new file mode 100644
index 00000000000..fbf8ad9d65a
--- /dev/null
+++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpersTests.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.pipeline;
+
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationExecutionException;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
+import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
+import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.mockito.Mockito.mock;
+
+public class BucketHelpersTests extends ESTestCase {
+
+    public void testReturnsObjectArray() {
+
+        MultiBucketsAggregation agg = new MultiBucketsAggregation() {
+            @Override
+            public List<? extends Bucket> getBuckets() {
+                return null;
+            }
+
+            @Override
+            public String getName() {
+                return "foo";
+            }
+
+            @Override
+            public String getType() {
+                return null;
+            }
+
+            @Override
+            public Map<String, Object> getMetaData() {
+                return null;
+            }
+
+            @Override
+            public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+                return null;
+            }
+        };
+
+        InternalMultiBucketAggregation.InternalBucket bucket = new InternalMultiBucketAggregation.InternalBucket() {
+            @Override
+            public void writeTo(StreamOutput out) throws IOException {
+
+            }
+
+            @Override
+            public Object getKey() {
+                return null;
+            }
+
+            @Override
+            public String getKeyAsString() {
+                return null;
+            }
+
+            @Override
+            public long getDocCount() {
+                return 0;
+            }
+
+            @Override
+            public Aggregations getAggregations() {
+                return null;
+            }
+
+            @Override
+            public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+                return null;
+            }
+
+            @Override
+            public Object getProperty(String containingAggName, List<String> path) {
+                return new Object[0];
+            }
+        };
+
+        AggregationExecutionException e = expectThrows(AggregationExecutionException.class,
+            () -> BucketHelpers.resolveBucketValue(agg, bucket, "foo>bar", BucketHelpers.GapPolicy.SKIP));
+
+        assertThat(e.getMessage(), equalTo("buckets_path must reference either a number value or a single value numeric " +
+            "metric aggregation, got: [Object[]] at aggregation [foo]"));
+    }
+
+    public void testReturnMultiValueObject() {
+
+        MultiBucketsAggregation agg = new MultiBucketsAggregation() {
+            @Override
+            public List<? extends Bucket> getBuckets() {
+                return null;
+            }
+
+            @Override
+            public String getName() {
+                return "foo";
+            }
+
+            @Override
+            public String getType() {
+                return null;
+            }
+
+            @Override
+            public Map<String, Object> getMetaData() {
+                return null;
+            }
+
+            @Override
+            public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+                return null;
+            }
+        };
+
+        InternalMultiBucketAggregation.InternalBucket bucket = new InternalMultiBucketAggregation.InternalBucket() {
+            @Override
+            public void writeTo(StreamOutput out) throws IOException {
+
+            }
+
+            @Override
+            public Object getKey() {
+                return null;
+            }
+
+            @Override
+            public String getKeyAsString() {
+                return null;
+            }
+
+            @Override
+            public long getDocCount() {
+                return 0;
+            }
+
+            @Override
+            public Aggregations getAggregations() {
+                return null;
+            }
+
+            @Override
+            public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+                return null;
+            }
+
+            @Override
+            public Object getProperty(String containingAggName, List<String> path) {
+                return mock(InternalTDigestPercentiles.class);
+            }
+        };
+
+        AggregationExecutionException e = expectThrows(AggregationExecutionException.class,
+            () -> BucketHelpers.resolveBucketValue(agg, bucket, "foo>bar", BucketHelpers.GapPolicy.SKIP));
+
+        assertThat(e.getMessage(), equalTo("buckets_path must reference either a number value or a single value numeric " +
+            "metric aggregation, but [foo] contains multiple values. Please specify which to use."));
+    }
+}

From 454148eee61e98dc905340ab6e6f2124dda18380 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Mon, 15 Apr 2019 11:27:38 -0400
Subject: [PATCH 025/112] Fix intervals section of auto date-histogram docs
 (#41203)

This section should be at the same sub-level as other sections in the
auto date-histogram docs, otherwise it is rendered on to another page
and is confusing for users to understand what it's in reference to.
---
 .../aggregations/bucket/autodatehistogram-aggregation.asciidoc  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc
index e371674228b..dfc4f62a91b 100644
--- a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc
+++ b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc
@@ -89,7 +89,7 @@ Response:
 --------------------------------------------------
 // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
 
-=== Intervals
+==== Intervals
 
 The interval of the returned buckets is selected based on the data collected by the 
 aggregation so that the number of buckets returned is less than or equal to the number 

From 636b9338a64eb67c610ded7ad89ea96ca6248e83 Mon Sep 17 00:00:00 2001
From: Alpar Torok <torokalpar@gmail.com>
Date: Mon, 15 Apr 2019 18:48:05 +0300
Subject: [PATCH 026/112] Fix the rpm and deb names for version starting with
 7.0.0 (#41198)

With the 7.0.0 release, we switched to download the packages instead of
using locally built ones.
This PR fixes the artifact names to include the architecture as
introduced in the 7.0.0 release.
---
 .../gradle/vagrant/VagrantTestPlugin.groovy          | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy
index 763b5509772..935c3dd39ff 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy
@@ -214,9 +214,15 @@ class VagrantTestPlugin implements Plugin<Project> {
             } else {
                 UPGRADE_FROM_ARCHIVES.each {
                     // The version of elasticsearch that we upgrade *from*
-                    dependencies.add("downloads.${it}:elasticsearch:${upgradeFromVersion}@${it}")
-                    if (upgradeFromVersion.onOrAfter('6.3.0')) {
-                        dependencies.add("downloads.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}")
+                    if (upgradeFromVersion.onOrAfter('7.0.0')) {
+                        String arch = it == "rpm" ? "x86_64" : "amd64"
+                        dependencies.add("downloads.${it}:elasticsearch:${upgradeFromVersion}-${arch}@${it}")
+                        dependencies.add("downloads.${it}:elasticsearch-oss:${upgradeFromVersion}-${arch}@${it}")
+                    } else {
+                        dependencies.add("downloads.${it}:elasticsearch:${upgradeFromVersion}@${it}")
+                        if (upgradeFromVersion.onOrAfter('6.3.0')) {
+                            dependencies.add("downloads.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}")
+                        }
                     }
                 }
             }

From f56b2ecb3761229408f32c4ce1966c3c3651b3e5 Mon Sep 17 00:00:00 2001
From: Martijn van Groningen <martijn.v.groningen@gmail.com>
Date: Mon, 15 Apr 2019 19:14:43 +0200
Subject: [PATCH 027/112] Remove xpack dependencies from qa rest modules
 (#41134) (7.x backport) (#41202)

This commit removes xpack dependencies of many xpack qa modules.
(for some qa modules this will require some more work)

The reason behind this change is that qa rest modules should not depend
on the x-pack plugins, because the plugins are an implementation detail and
the tests should only know about the rest interface and qa cluster that is
being tested.

Also some qa modules rely on xpack plugins and hlrc (which is a valid
dependency for rest qa tests) creates a cyclic dependency and this is
something that we should avoid.  Also Eclipse can't handle gradle cyclic
dependencies (see #41064).

* don't copy xpack-core's plugin property into the test resource of qa
modules. Otherwise installing security manager fails, because it tries
to find the XPackPlugin class.
---
 .../support}/WatcherTemplateTests.java        |  2 +-
 x-pack/qa/build.gradle                        |  7 +++
 .../build.gradle                              |  3 +-
 ...CoreWithSecurityClientYamlTestSuiteIT.java |  2 +-
 x-pack/qa/full-cluster-restart/build.gradle   | 27 ++---------
 .../xpack/restart/FullClusterRestartIT.java   | 47 +++++++++----------
 .../MlMigrationFullClusterRestartIT.java      |  6 +--
 .../build.gradle                              |  4 +-
 ...sterSearchWithSecurityYamlTestSuiteIT.java |  2 +-
 x-pack/qa/multi-node/build.gradle             |  2 +-
 .../GlobalCheckpointSyncActionIT.java         |  2 +-
 .../elasticsearch/multi_node/RollupIT.java    |  2 +-
 x-pack/qa/rolling-upgrade-basic/build.gradle  | 13 +----
 .../build.gradle                              |  4 +-
 x-pack/qa/rolling-upgrade/build.gradle        | 14 +-----
 .../upgrades/AbstractUpgradeTestCase.java     |  6 +--
 .../upgrades/MlMappingsUpgradeIT.java         |  6 +--
 .../UpgradeClusterClientYamlTestSuiteIT.java  |  3 +-
 x-pack/qa/saml-idp-tests/build.gradle         |  7 +--
 .../build.gradle                              |  4 +-
 .../MonitoringWithWatcherRestIT.java          | 33 +++++++------
 x-pack/qa/smoke-test-plugins/build.gradle     |  2 +-
 ...SmokeTestPluginsClientYamlTestSuiteIT.java |  2 +-
 .../build.gradle                              |  5 +-
 ...rityWithMustacheClientYamlTestSuiteIT.java |  2 +-
 .../build.gradle                              |  2 +-
 ...cherWithSecurityClientYamlTestSuiteIT.java |  6 +--
 .../SmokeTestWatcherWithSecurityIT.java       |  6 +--
 x-pack/qa/smoke-test-watcher/build.gradle     |  5 +-
 .../SmokeTestWatcherTestSuiteIT.java          |  6 +--
 .../smoketest/WatcherRestIT.java              |  4 +-
 .../test/SecuritySettingsSourceField.java     | 42 +++++++++++++++++
 .../test/rest/XPackRestTestConstants.java     | 43 +++++++++++++++++
 .../xpack/test/rest/XPackRestTestHelper.java  | 25 +++-------
 34 files changed, 186 insertions(+), 160 deletions(-)
 rename x-pack/{qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest => plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support}/WatcherTemplateTests.java (99%)
 create mode 100644 x-pack/qa/src/main/java/org/elasticsearch/xpack/test/SecuritySettingsSourceField.java
 create mode 100644 x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestConstants.java
 rename x-pack/{plugin/core/src/test => qa/src/main}/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java (75%)

diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherTemplateTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherTemplateTests.java
similarity index 99%
rename from x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherTemplateTests.java
rename to x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherTemplateTests.java
index df98e731187..8c45bc15b4c 100644
--- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherTemplateTests.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherTemplateTests.java
@@ -3,7 +3,7 @@
  * or more contributor license agreements. Licensed under the Elastic License;
  * you may not use this file except in compliance with the Elastic License.
  */
-package org.elasticsearch.smoketest;
+package org.elasticsearch.xpack.watcher.support;
 
 import com.fasterxml.jackson.core.io.JsonStringEncoder;
 import org.elasticsearch.common.Nullable;
diff --git a/x-pack/qa/build.gradle b/x-pack/qa/build.gradle
index 7d705d5b0dc..2555b0ef729 100644
--- a/x-pack/qa/build.gradle
+++ b/x-pack/qa/build.gradle
@@ -3,6 +3,13 @@
 
 import org.elasticsearch.gradle.test.RestIntegTestTask
 
+apply plugin: 'elasticsearch.build'
+test.enabled = false
+
+dependencies {
+  compile project(':test:framework')
+}
+
 subprojects {
   // HACK: please fix this
   // we want to add the rest api specs for xpack to qa tests, but we
diff --git a/x-pack/qa/core-rest-tests-with-security/build.gradle b/x-pack/qa/core-rest-tests-with-security/build.gradle
index dbf5aa6fc22..72b62f94b55 100644
--- a/x-pack/qa/core-rest-tests-with-security/build.gradle
+++ b/x-pack/qa/core-rest-tests-with-security/build.gradle
@@ -2,8 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test'
 apply plugin: 'elasticsearch.rest-test'
 
 dependencies {
-  testCompile project(path: xpackModule('core'), configuration: 'testArtifacts')
-  testCompile project(path: xpackModule('security'), configuration: 'testArtifacts')
+  testCompile project(':x-pack:qa')
 }
 
 integTest {
diff --git a/x-pack/qa/core-rest-tests-with-security/src/test/java/org/elasticsearch/xpack/security/CoreWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/core-rest-tests-with-security/src/test/java/org/elasticsearch/xpack/security/CoreWithSecurityClientYamlTestSuiteIT.java
index 212a342479d..1d98c4fd59c 100644
--- a/x-pack/qa/core-rest-tests-with-security/src/test/java/org/elasticsearch/xpack/security/CoreWithSecurityClientYamlTestSuiteIT.java
+++ b/x-pack/qa/core-rest-tests-with-security/src/test/java/org/elasticsearch/xpack/security/CoreWithSecurityClientYamlTestSuiteIT.java
@@ -18,7 +18,7 @@ import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
 
 import java.util.Objects;
 
-import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
+import static org.elasticsearch.xpack.test.SecuritySettingsSourceField.basicAuthHeaderValue;
 
 @TimeoutSuite(millis = 30 * TimeUnits.MINUTE) // as default timeout seems not enough on the jenkins VMs
 public class CoreWithSecurityClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle
index 964cc2fb43c..7f0e14d2a53 100644
--- a/x-pack/qa/full-cluster-restart/build.gradle
+++ b/x-pack/qa/full-cluster-restart/build.gradle
@@ -10,22 +10,12 @@ apply plugin: 'elasticsearch.standalone-test'
 test.enabled = false
 
 dependencies {
-    // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here
-    testCompile project(path: xpackModule('core'), configuration: 'default')
-    testCompile (project(path: xpackModule('security'), configuration: 'runtime')) {
-        // Need to drop the guava dependency here or we get a conflict with watcher's guava dependency.
-        // This is total #$%, but the solution is to get the SAML realm (which uses guava) out of security proper
-        exclude group: "com.google.guava", module: "guava"
-    }
-    testCompile project(path: xpackModule('watcher'), configuration: 'runtime')
-
+    // TODO: Remove core dependency and change tests to not use builders that are part of xpack-core.
+    // Currently needed for ml tests are using the building for datafeed and job config)
     testCompile project(path: xpackModule('core'), configuration: 'testArtifacts')
-    testCompile (project(path: xpackModule('security'), configuration: 'testArtifacts')) {
-        // Need to drop the guava dependency here or we get a conflict with watcher's guava dependency.
-        // This is total #$%, but the solution is to get the SAML realm (which uses guava) out of security proper
-        exclude group: "com.google.guava", module: "guava"
-    }
+
     testCompile project(path: ':qa:full-cluster-restart', configuration: 'testArtifacts')
+    testCompile project(':x-pack:qa')
 }
 
 Closure waitWithAuth = { NodeInfo node, AntBuilder ant ->
@@ -225,11 +215,4 @@ task copyXPackRestSpec(type: Copy) {
     include 'rest-api-spec/api/**'
     into project.sourceSets.test.output.resourcesDir
 }
-
-task copyXPackPluginProps(type: Copy) {
-    dependsOn(copyXPackRestSpec)
-    from project(xpackModule('core')).file('src/main/plugin-metadata')
-    from project(xpackModule('core')).tasks.pluginProperties
-    into outputDir
-}
-project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps)
+project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackRestSpec)
diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java
index 9e4e5cfb2e7..322e97db765 100644
--- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java
+++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java
@@ -13,7 +13,6 @@ import org.elasticsearch.client.ResponseException;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.util.concurrent.ThreadContext;
 import org.elasticsearch.common.xcontent.ObjectPath;
-import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.common.xcontent.support.XContentMapValues;
 import org.elasticsearch.rest.RestStatus;
 import org.elasticsearch.rest.action.document.RestGetAction;
@@ -22,16 +21,6 @@ import org.elasticsearch.rest.action.search.RestSearchAction;
 import org.elasticsearch.test.StreamsUtils;
 import org.elasticsearch.test.rest.ESRestTestCase;
 import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase;
-import org.elasticsearch.xpack.core.upgrade.UpgradeField;
-import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder;
-import org.elasticsearch.xpack.security.support.SecurityIndexManager;
-import org.elasticsearch.xpack.watcher.actions.index.IndexAction;
-import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction;
-import org.elasticsearch.xpack.watcher.common.text.TextTemplate;
-import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition;
-import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest;
-import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule;
-import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger;
 import org.hamcrest.Matcher;
 import org.junit.Before;
 
@@ -61,6 +50,15 @@ import static org.hamcrest.Matchers.startsWith;
 
 public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
 
+    public static final String INDEX_ACTION_TYPES_DEPRECATION_MESSAGE =
+        "[types removal] Specifying types in a watcher index action is deprecated.";
+
+    public static final String SEARCH_INPUT_TYPES_DEPRECATION_MESSAGE =
+        "[types removal] Specifying types in a watcher search request is deprecated.";
+
+    public static final int UPGRADE_FIELD_EXPECTED_INDEX_FORMAT_VERSION = 6;
+    public static final int SECURITY_EXPECTED_INDEX_FORMAT_VERSION = 6;
+
     private String type;
 
     @Before
@@ -123,7 +121,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
                 if (settingsMap.containsKey("index")) {
                     @SuppressWarnings("unchecked")
                     int format = Integer.parseInt(String.valueOf(((Map<String, Object>)settingsMap.get("index")).get("format")));
-                    assertEquals("The security index needs to be upgraded", SecurityIndexManager.INTERNAL_INDEX_FORMAT, format);
+                    assertEquals("The security index needs to be upgraded", SECURITY_EXPECTED_INDEX_FORMAT_VERSION, format);
                 }
             }
 
@@ -144,8 +142,8 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
             Request createBwcWatch = new Request("PUT", getWatcherEndpoint() + "/watch/bwc_watch");
             Request createBwcThrottlePeriod = new Request("PUT", getWatcherEndpoint() + "/watch/bwc_throttle_period");
             if (getOldClusterVersion().onOrAfter(Version.V_7_0_0)) {
-                createBwcWatch.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE));
-                createBwcThrottlePeriod.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE));
+                createBwcWatch.setOptions(expectWarnings(INDEX_ACTION_TYPES_DEPRECATION_MESSAGE));
+                createBwcThrottlePeriod.setOptions(expectWarnings(INDEX_ACTION_TYPES_DEPRECATION_MESSAGE));
             }
             createBwcWatch.setJsonEntity(loadWatch("simple-watch.json"));
             client().performRequest(createBwcWatch);
@@ -183,7 +181,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
                 logger.info("settings map {}", settingsMap);
                 if (settingsMap.containsKey("index")) {
                     int format = Integer.parseInt(String.valueOf(((Map<String, Object>)settingsMap.get("index")).get("format")));
-                    assertEquals("The watches index needs to be upgraded", UpgradeField.EXPECTED_INDEX_FORMAT_VERSION, format);
+                    assertEquals("The watches index needs to be upgraded", UPGRADE_FIELD_EXPECTED_INDEX_FORMAT_VERSION, format);
                 }
             }
 
@@ -450,14 +448,14 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
         if (getOldClusterVersion().before(Version.V_7_0_0)) {
             getRequest.setOptions(
                 expectWarnings(
-                    IndexAction.TYPES_DEPRECATION_MESSAGE,
-                    WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE
+                    INDEX_ACTION_TYPES_DEPRECATION_MESSAGE,
+                    SEARCH_INPUT_TYPES_DEPRECATION_MESSAGE
                 )
             );
         } else {
             getRequest.setOptions(
                 expectWarnings(
-                    IndexAction.TYPES_DEPRECATION_MESSAGE
+                    INDEX_ACTION_TYPES_DEPRECATION_MESSAGE
                 )
             );
         }
@@ -480,14 +478,14 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
         if (getOldClusterVersion().before(Version.V_7_0_0)) {
             getRequest.setOptions(
                 expectWarnings(
-                    IndexAction.TYPES_DEPRECATION_MESSAGE,
-                    WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE
+                    INDEX_ACTION_TYPES_DEPRECATION_MESSAGE,
+                    SEARCH_INPUT_TYPES_DEPRECATION_MESSAGE
                 )
             );
         } else {
             getRequest.setOptions(
                 expectWarnings(
-                    IndexAction.TYPES_DEPRECATION_MESSAGE
+                    INDEX_ACTION_TYPES_DEPRECATION_MESSAGE
                 )
             );
         }
@@ -529,10 +527,9 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
 
     private void assertBasicWatchInteractions() throws Exception {
 
-        String watch = new WatchSourceBuilder()
-                .condition(InternalAlwaysCondition.INSTANCE)
-                .trigger(ScheduleTrigger.builder(new IntervalSchedule(IntervalSchedule.Interval.seconds(1))))
-                .addAction("awesome", LoggingAction.builder(new TextTemplate("test"))).buildAsBytes(XContentType.JSON).utf8ToString();
+        String watch = "{\"trigger\":{\"schedule\":{\"interval\":\"1s\"}},\"input\":{\"none\":{}}," +
+            "\"condition\":{\"always\":{}}," +
+            "\"actions\":{\"awesome\":{\"logging\":{\"level\":\"info\",\"text\":\"test\"}}}}";
         Request createWatchRequest = new Request("PUT", "_watcher/watch/new_watch");
         createWatchRequest.setJsonEntity(watch);
         Map<String, Object> createWatch = entityAsMap(client().performRequest(createWatchRequest));
diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java
index b881af65420..10bf89a31ef 100644
--- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java
+++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java
@@ -24,6 +24,7 @@ import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig;
 import org.elasticsearch.xpack.core.ml.job.config.DataDescription;
 import org.elasticsearch.xpack.core.ml.job.config.Detector;
 import org.elasticsearch.xpack.core.ml.job.config.Job;
+import org.elasticsearch.xpack.test.rest.XPackRestTestConstants;
 import org.elasticsearch.xpack.test.rest.XPackRestTestHelper;
 import org.junit.Before;
 
@@ -56,14 +57,13 @@ public class MlMigrationFullClusterRestartIT extends AbstractFullClusterRestartT
 
     @Before
     public void waitForMlTemplates() throws Exception {
-        List<String> templatesToWaitFor = XPackRestTestHelper.ML_POST_V660_TEMPLATES;
+        List<String> templatesToWaitFor = XPackRestTestConstants.ML_POST_V660_TEMPLATES;
 
         // If upgrading from a version prior to v6.6.0 the set of templates
         // to wait for is different
         if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_6_0) ) {
-                templatesToWaitFor = XPackRestTestHelper.ML_PRE_V660_TEMPLATES;
+                templatesToWaitFor = XPackRestTestConstants.ML_PRE_V660_TEMPLATES;
         }
-
         XPackRestTestHelper.waitForTemplates(client(), templatesToWaitFor);
     }
 
diff --git a/x-pack/qa/multi-cluster-search-security/build.gradle b/x-pack/qa/multi-cluster-search-security/build.gradle
index c31b2c0ad1d..8406345575d 100644
--- a/x-pack/qa/multi-cluster-search-security/build.gradle
+++ b/x-pack/qa/multi-cluster-search-security/build.gradle
@@ -3,9 +3,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask
 apply plugin: 'elasticsearch.standalone-test'
 
 dependencies {
-  // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here
-  testCompile project(path: xpackModule('core'), configuration: 'default')
-  testCompile project(path: xpackModule('core'), configuration: 'testArtifacts')
+  testCompile project(':x-pack:qa')
 }
 
 task remoteClusterTest(type: RestIntegTestTask) {
diff --git a/x-pack/qa/multi-cluster-search-security/src/test/java/org/elasticsearch/xpack/security/MultiClusterSearchWithSecurityYamlTestSuiteIT.java b/x-pack/qa/multi-cluster-search-security/src/test/java/org/elasticsearch/xpack/security/MultiClusterSearchWithSecurityYamlTestSuiteIT.java
index e61ff9062d1..011fe5be928 100644
--- a/x-pack/qa/multi-cluster-search-security/src/test/java/org/elasticsearch/xpack/security/MultiClusterSearchWithSecurityYamlTestSuiteIT.java
+++ b/x-pack/qa/multi-cluster-search-security/src/test/java/org/elasticsearch/xpack/security/MultiClusterSearchWithSecurityYamlTestSuiteIT.java
@@ -14,7 +14,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
 import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
 import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
 
-import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
+import static org.elasticsearch.xpack.test.SecuritySettingsSourceField.basicAuthHeaderValue;
 
 public class MultiClusterSearchWithSecurityYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
 
diff --git a/x-pack/qa/multi-node/build.gradle b/x-pack/qa/multi-node/build.gradle
index 243a6f40438..28de4a4c114 100644
--- a/x-pack/qa/multi-node/build.gradle
+++ b/x-pack/qa/multi-node/build.gradle
@@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test'
 apply plugin: 'elasticsearch.rest-test'
 
 dependencies {
-  testCompile "org.elasticsearch.plugin:x-pack-core:${version}"
+  testCompile project(':x-pack:qa')
 }
 
 integTestCluster {
diff --git a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java
index 9d3e88cbc5c..02dd7ecb40c 100644
--- a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java
+++ b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java
@@ -16,7 +16,7 @@ import org.elasticsearch.test.rest.ESRestTestCase;
 import org.elasticsearch.test.rest.yaml.ObjectPath;
 
 import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
+import static org.elasticsearch.xpack.test.SecuritySettingsSourceField.basicAuthHeaderValue;
 import static org.hamcrest.Matchers.equalTo;
 
 public class GlobalCheckpointSyncActionIT extends ESRestTestCase {
diff --git a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java
index feddc57f6e0..a5579ad0aa5 100644
--- a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java
+++ b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java
@@ -32,7 +32,7 @@ import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
 import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
+import static org.elasticsearch.xpack.test.SecuritySettingsSourceField.basicAuthHeaderValue;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.isOneOf;
 
diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle
index 770dfffce7c..4d8da870e33 100644
--- a/x-pack/qa/rolling-upgrade-basic/build.gradle
+++ b/x-pack/qa/rolling-upgrade-basic/build.gradle
@@ -4,9 +4,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask
 apply plugin: 'elasticsearch.standalone-test'
 
 dependencies {
-    // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here
-    testCompile project(path: xpackModule('core'), configuration: 'default')
-    testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit
+    testCompile project(':x-pack:qa')
 }
 
 // This is a top level task which we will add dependencies to below.
@@ -143,14 +141,7 @@ task copyXPackRestSpec(type: Copy) {
     include 'rest-api-spec/api/**'
     into project.sourceSets.test.output.resourcesDir
 }
-
-task copyXPackPluginProps(type: Copy) {
-    dependsOn(copyXPackRestSpec)
-    from project(xpackModule('core')).file('src/main/plugin-metadata')
-    from project(xpackModule('core')).tasks.pluginProperties
-    into outputDir
-}
-project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps)
+project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackRestSpec)
 
 repositories {
     maven {
diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle
index 2715b4e7024..0d137af673c 100644
--- a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle
+++ b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle
@@ -4,9 +4,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask
 apply plugin: 'elasticsearch.standalone-test'
 
 dependencies {
-    // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here
-    testCompile project(path: xpackModule('core'), configuration: 'default')
-    testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit
+    testCompile project(':x-pack:qa')
 }
 
 // This is a top level task which we will add dependencies to below.
diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle
index a29cbe00ffd..56daa59367d 100644
--- a/x-pack/qa/rolling-upgrade/build.gradle
+++ b/x-pack/qa/rolling-upgrade/build.gradle
@@ -10,10 +10,7 @@ apply plugin: 'elasticsearch.standalone-test'
 test.enabled = false
 
 dependencies {
-  // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here
-  testCompile project(path: xpackModule('core'), configuration: 'default')
-  testCompile project(path: xpackModule('security'), configuration: 'runtime')
-  testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit
+  testCompile project(':x-pack:qa')
   testCompile ("org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}")
 }
 
@@ -298,11 +295,4 @@ task copyXPackRestSpec(type: Copy) {
     include 'rest-api-spec/api/**'
     into project.sourceSets.test.output.resourcesDir
 }
-
-task copyXPackPluginProps(type: Copy) {
-    dependsOn(copyXPackRestSpec)
-    from project(xpackModule('core')).file('src/main/plugin-metadata')
-    from project(xpackModule('core')).tasks.pluginProperties
-    into outputDir
-}
-project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps)
+project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackRestSpec)
diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java
index 0d0f05bcf9c..64c3a785d14 100644
--- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java
+++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java
@@ -8,7 +8,7 @@ package org.elasticsearch.upgrades;
 import org.elasticsearch.client.Request;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.util.concurrent.ThreadContext;
-import org.elasticsearch.test.SecuritySettingsSourceField;
+import org.elasticsearch.xpack.test.SecuritySettingsSourceField;
 import org.elasticsearch.test.rest.ESRestTestCase;
 import org.junit.Before;
 
@@ -16,12 +16,12 @@ import java.io.IOException;
 import java.util.Collection;
 import java.util.Collections;
 
-import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
+import static org.elasticsearch.xpack.test.SecuritySettingsSourceField.basicAuthHeaderValue;
 
 public abstract class AbstractUpgradeTestCase extends ESRestTestCase {
 
     private static final String BASIC_AUTH_VALUE =
-            basicAuthHeaderValue("test_user", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING);
+            basicAuthHeaderValue("test_user", SecuritySettingsSourceField.TEST_PASSWORD);
 
     @Override
     protected boolean preserveIndicesUponCompletion() {
diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java
index 4bded9a25c5..13ed2dafc5f 100644
--- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java
+++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java
@@ -14,7 +14,7 @@ import org.elasticsearch.client.ml.job.config.Detector;
 import org.elasticsearch.client.ml.job.config.Job;
 import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex;
+import org.elasticsearch.xpack.test.rest.XPackRestTestConstants;
 import org.elasticsearch.xpack.test.rest.XPackRestTestHelper;
 
 import java.io.IOException;
@@ -30,7 +30,7 @@ public class MlMappingsUpgradeIT extends AbstractUpgradeTestCase {
 
     @Override
     protected Collection<String> templatesToWaitFor() {
-        return Stream.concat(XPackRestTestHelper.ML_POST_V660_TEMPLATES.stream(),
+        return Stream.concat(XPackRestTestConstants.ML_POST_V660_TEMPLATES.stream(),
             super.templatesToWaitFor().stream()).collect(Collectors.toSet());
     }
 
@@ -81,7 +81,7 @@ public class MlMappingsUpgradeIT extends AbstractUpgradeTestCase {
     private void assertUpgradedMappings() throws Exception {
 
         assertBusy(() -> {
-            Request getMappings = new Request("GET", AnomalyDetectorsIndex.resultsWriteAlias(JOB_ID) + "/_mappings");
+            Request getMappings = new Request("GET", XPackRestTestHelper.resultsWriteAlias(JOB_ID) + "/_mappings");
             Response response = client().performRequest(getMappings);
 
             Map<String, Object> responseLevel = entityAsMap(response);
diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java
index 9374346449c..33f256ac1e1 100644
--- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java
+++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java
@@ -13,6 +13,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
 import org.elasticsearch.test.rest.ESRestTestCase;
 import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
 import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
+import org.elasticsearch.xpack.test.rest.XPackRestTestConstants;
 import org.elasticsearch.xpack.test.rest.XPackRestTestHelper;
 import org.junit.Before;
 
@@ -28,7 +29,7 @@ public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa
      */
     @Before
     public void waitForTemplates() throws Exception {
-        XPackRestTestHelper.waitForTemplates(client(), XPackRestTestHelper.ML_POST_V660_TEMPLATES);
+        XPackRestTestHelper.waitForTemplates(client(), XPackRestTestConstants.ML_POST_V660_TEMPLATES);
     }
 
     @Override
diff --git a/x-pack/qa/saml-idp-tests/build.gradle b/x-pack/qa/saml-idp-tests/build.gradle
index 7b76321fe9d..4355ac0b5b8 100644
--- a/x-pack/qa/saml-idp-tests/build.gradle
+++ b/x-pack/qa/saml-idp-tests/build.gradle
@@ -6,10 +6,7 @@ apply plugin: 'elasticsearch.rest-test'
 apply plugin: 'elasticsearch.test.fixtures'
 
 dependencies {
-    // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here
-    testCompile project(path: xpackModule('core'), configuration: 'default')
     testCompile project(path: xpackModule('core'), configuration: 'testArtifacts')
-    testCompile project(path: xpackModule('security'), configuration: 'testArtifacts')
     testCompile 'com.google.jimfs:jimfs:1.1'
 }
 testFixtures.useFixture ":x-pack:test:idp-fixture"
@@ -103,9 +100,7 @@ thirdPartyAudit {
       'com.google.common.cache.Striped64$1',
       'com.google.common.cache.Striped64$Cell',
       'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator',
-      'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1',
-      'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper',
-      'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1'
+      'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1'
    )
 
    ignoreMissingClasses ( 
diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle
index 57be337f634..18bf5656f19 100644
--- a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle
+++ b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle
@@ -2,9 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test'
 apply plugin: 'elasticsearch.rest-test'
 
 dependencies {
-  testCompile "org.elasticsearch.plugin:x-pack-core:${version}"
-  testCompile project(path: xpackModule('watcher'))
-  testCompile project(path: xpackModule('monitoring'))
+  testCompile project(':x-pack:qa')
 }
 
 integTestCluster {
diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java
index 66b52afed16..40ed71df842 100644
--- a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java
+++ b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java
@@ -9,28 +9,32 @@ import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
 import org.elasticsearch.client.Request;
 import org.elasticsearch.client.Response;
 import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.test.junit.annotations.TestLogging;
 import org.elasticsearch.test.rest.ESRestTestCase;
 import org.elasticsearch.test.rest.yaml.ObjectPath;
-import org.elasticsearch.xpack.monitoring.exporter.ClusterAlertsUtil;
-import org.elasticsearch.xpack.watcher.actions.ActionBuilders;
-import org.elasticsearch.xpack.watcher.client.WatchSourceBuilders;
-import org.elasticsearch.xpack.watcher.trigger.TriggerBuilders;
-import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule;
 import org.junit.After;
 
 import java.io.IOException;
 
 import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput;
-import static org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule.Interval.Unit.MINUTES;
 import static org.hamcrest.Matchers.is;
 
 @TestLogging("org.elasticsearch.client:TRACE,tracer:TRACE")
 @AwaitsFix(bugUrl = "flaky tests")
 public class MonitoringWithWatcherRestIT extends ESRestTestCase {
 
+    /**
+     * An unsorted list of Watch IDs representing resource files for Monitoring Cluster Alerts.
+     */
+    public static final String[] WATCH_IDS = {
+        "elasticsearch_cluster_status",
+        "elasticsearch_version_mismatch",
+        "kibana_version_mismatch",
+        "logstash_version_mismatch",
+        "xpack_license_expiration",
+        "elasticsearch_nodes",
+    };
+
     @After
     public void cleanExporters() throws Exception {
         Request request = new Request("PUT", "/_cluster/settings");
@@ -53,7 +57,7 @@ public class MonitoringWithWatcherRestIT extends ESRestTestCase {
                 .endObject().endObject()));
         adminClient().performRequest(request);
 
-        assertTotalWatchCount(ClusterAlertsUtil.WATCH_IDS.length);
+        assertTotalWatchCount(WATCH_IDS.length);
 
         assertMonitoringWatchHasBeenOverWritten(watchId);
     }
@@ -71,7 +75,7 @@ public class MonitoringWithWatcherRestIT extends ESRestTestCase {
                 .endObject().endObject()));
         adminClient().performRequest(request);
 
-        assertTotalWatchCount(ClusterAlertsUtil.WATCH_IDS.length);
+        assertTotalWatchCount(WATCH_IDS.length);
 
         assertMonitoringWatchHasBeenOverWritten(watchId);
     }
@@ -95,11 +99,10 @@ public class MonitoringWithWatcherRestIT extends ESRestTestCase {
         String clusterUUID = getClusterUUID();
         String watchId = clusterUUID + "_kibana_version_mismatch";
         Request request = new Request("PUT", "/_watcher/watch/" + watchId);
-        request.setJsonEntity(WatchSourceBuilders.watchBuilder()
-                .trigger(TriggerBuilders.schedule(new IntervalSchedule(new IntervalSchedule.Interval(1000, MINUTES))))
-                .input(simpleInput())
-                .addAction("logme", ActionBuilders.loggingAction("foo"))
-                .buildAsBytes(XContentType.JSON).utf8ToString());
+        String watch = "{\"trigger\":{\"schedule\":{\"interval\":\"1000m\"}},\"input\":{\"simple\":{}}," +
+            "\"condition\":{\"always\":{}}," +
+            "\"actions\":{\"logme\":{\"logging\":{\"level\":\"info\",\"text\":\"foo\"}}}}";
+        request.setJsonEntity(watch);
         client().performRequest(request);
         return watchId;
     }
diff --git a/x-pack/qa/smoke-test-plugins/build.gradle b/x-pack/qa/smoke-test-plugins/build.gradle
index 3b7661eeeb0..5aa3adbdf09 100644
--- a/x-pack/qa/smoke-test-plugins/build.gradle
+++ b/x-pack/qa/smoke-test-plugins/build.gradle
@@ -4,7 +4,7 @@ apply plugin: 'elasticsearch.standalone-rest-test'
 apply plugin: 'elasticsearch.rest-test'
 
 dependencies {
-  testCompile "org.elasticsearch.plugin:x-pack-core:${version}"
+  testCompile project(':x-pack:qa')
 }
 
 ext.pluginsCount = 0
diff --git a/x-pack/qa/smoke-test-plugins/src/test/java/org/elasticsearch/smoketest/XSmokeTestPluginsClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-plugins/src/test/java/org/elasticsearch/smoketest/XSmokeTestPluginsClientYamlTestSuiteIT.java
index 29671386f5b..6a676be7430 100644
--- a/x-pack/qa/smoke-test-plugins/src/test/java/org/elasticsearch/smoketest/XSmokeTestPluginsClientYamlTestSuiteIT.java
+++ b/x-pack/qa/smoke-test-plugins/src/test/java/org/elasticsearch/smoketest/XSmokeTestPluginsClientYamlTestSuiteIT.java
@@ -14,7 +14,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
 import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
 import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
 
-import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
+import static org.elasticsearch.xpack.test.SecuritySettingsSourceField.basicAuthHeaderValue;
 
 public class XSmokeTestPluginsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
 
diff --git a/x-pack/qa/smoke-test-security-with-mustache/build.gradle b/x-pack/qa/smoke-test-security-with-mustache/build.gradle
index 48b525ba3da..bf2e6c32573 100644
--- a/x-pack/qa/smoke-test-security-with-mustache/build.gradle
+++ b/x-pack/qa/smoke-test-security-with-mustache/build.gradle
@@ -2,10 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test'
 apply plugin: 'elasticsearch.rest-test'
 
 dependencies {
-  // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here
-  testCompile project(path: xpackModule('core'), configuration: 'default')
-  testCompile project(path: xpackModule('core'), configuration: 'testArtifacts')
-  testCompile project(path: ':modules:lang-mustache', configuration: 'runtime')
+  testCompile project(':x-pack:qa')
 }
 
 integTestCluster {
diff --git a/x-pack/qa/smoke-test-security-with-mustache/src/test/java/org/elasticsearch/smoketest/SmokeTestSecurityWithMustacheClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-security-with-mustache/src/test/java/org/elasticsearch/smoketest/SmokeTestSecurityWithMustacheClientYamlTestSuiteIT.java
index 46daddae62b..f65fd64be29 100644
--- a/x-pack/qa/smoke-test-security-with-mustache/src/test/java/org/elasticsearch/smoketest/SmokeTestSecurityWithMustacheClientYamlTestSuiteIT.java
+++ b/x-pack/qa/smoke-test-security-with-mustache/src/test/java/org/elasticsearch/smoketest/SmokeTestSecurityWithMustacheClientYamlTestSuiteIT.java
@@ -14,7 +14,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
 import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
 import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
 
-import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
+import static org.elasticsearch.xpack.test.SecuritySettingsSourceField.basicAuthHeaderValue;
 
 public class SmokeTestSecurityWithMustacheClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
 
diff --git a/x-pack/qa/smoke-test-watcher-with-security/build.gradle b/x-pack/qa/smoke-test-watcher-with-security/build.gradle
index 2ff2ff9272f..0b622fc446b 100644
--- a/x-pack/qa/smoke-test-watcher-with-security/build.gradle
+++ b/x-pack/qa/smoke-test-watcher-with-security/build.gradle
@@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test'
 apply plugin: 'elasticsearch.rest-test'
 
 dependencies {
-  testCompile "org.elasticsearch.plugin:x-pack-core:${version}"
+  testCompile project(':x-pack:qa')
 }
 
 // bring in watcher rest test suite
diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java
index 879be233fa1..679bc08f01f 100644
--- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java
+++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java
@@ -14,7 +14,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
 import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
 import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse;
 import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
-import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField;
+import org.elasticsearch.xpack.test.rest.XPackRestTestConstants;
 import org.junit.After;
 import org.junit.Before;
 
@@ -23,7 +23,7 @@ import java.util.Collections;
 import static java.util.Collections.emptyList;
 import static java.util.Collections.emptyMap;
 import static java.util.Collections.singletonMap;
-import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
+import static org.elasticsearch.xpack.test.SecuritySettingsSourceField.basicAuthHeaderValue;
 import static org.hamcrest.Matchers.is;
 
 public class SmokeTestWatcherWithSecurityClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
@@ -76,7 +76,7 @@ public class SmokeTestWatcherWithSecurityClientYamlTestSuiteIT extends ESClientY
         });
 
         assertBusy(() -> {
-            for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM) {
+            for (String template : XPackRestTestConstants.TEMPLATE_NAMES_NO_ILM) {
                 ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template",
                         singletonMap("name", template), emptyList(), emptyMap());
                 assertThat(templateExistsResponse.getStatusCode(), is(200));
diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java
index 226ebc29ec7..394c63b3723 100644
--- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java
+++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java
@@ -16,7 +16,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.test.rest.ESRestTestCase;
 import org.elasticsearch.test.rest.yaml.ObjectPath;
-import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField;
+import org.elasticsearch.xpack.test.rest.XPackRestTestConstants;
 import org.junit.After;
 import org.junit.Before;
 
@@ -26,7 +26,7 @@ import java.util.concurrent.atomic.AtomicReference;
 
 import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
 import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM;
-import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
+import static org.elasticsearch.xpack.test.SecuritySettingsSourceField.basicAuthHeaderValue;
 import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.greaterThanOrEqualTo;
 import static org.hamcrest.Matchers.hasEntry;
@@ -83,7 +83,7 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase {
         });
 
         assertBusy(() -> {
-            for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) {
+            for (String template : XPackRestTestConstants.TEMPLATE_NAMES) {
                 assertOK(adminClient().performRequest(new Request("HEAD", "_template/" + template)));
             }
         });
diff --git a/x-pack/qa/smoke-test-watcher/build.gradle b/x-pack/qa/smoke-test-watcher/build.gradle
index fb2e4c06ced..9194c46daed 100644
--- a/x-pack/qa/smoke-test-watcher/build.gradle
+++ b/x-pack/qa/smoke-test-watcher/build.gradle
@@ -2,10 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test'
 apply plugin: 'elasticsearch.rest-test'
 
 dependencies {
-  testCompile "org.elasticsearch.plugin:x-pack-core:${version}"
-  testCompile project(path: xpackModule('watcher'), configuration: 'runtime')
-  testCompile project(path: ':modules:lang-mustache', configuration: 'runtime')
-  testCompile project(path: ':modules:lang-painless', configuration: 'runtime')
+  testCompile project(':x-pack:qa')
 }
 
 integTestCluster {
diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java
index 8f30ec41711..3df9512298e 100644
--- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java
+++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java
@@ -14,7 +14,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.test.rest.ESRestTestCase;
 import org.elasticsearch.test.rest.yaml.ObjectPath;
-import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField;
+import org.elasticsearch.xpack.test.rest.XPackRestTestConstants;
 import org.junit.After;
 import org.junit.Before;
 
@@ -24,7 +24,7 @@ import java.util.concurrent.atomic.AtomicReference;
 
 import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
 import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM;
-import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
+import static org.elasticsearch.xpack.test.SecuritySettingsSourceField.basicAuthHeaderValue;
 import static org.hamcrest.Matchers.greaterThanOrEqualTo;
 import static org.hamcrest.Matchers.hasEntry;
 import static org.hamcrest.Matchers.is;
@@ -63,7 +63,7 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase {
         });
 
         assertBusy(() -> {
-            for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM) {
+            for (String template : XPackRestTestConstants.TEMPLATE_NAMES_NO_ILM) {
                 Response templateExistsResponse = adminClient().performRequest(new Request("HEAD", "/_template/" + template));
                 assertThat(templateExistsResponse.getStatusLine().getStatusCode(), is(200));
             }
diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java
index 19c82c8cef7..2dd5cc86a89 100644
--- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java
+++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java
@@ -10,7 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
 import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
 import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse;
 import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
-import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField;
+import org.elasticsearch.xpack.test.rest.XPackRestTestConstants;
 import org.junit.After;
 import org.junit.Before;
 
@@ -58,7 +58,7 @@ public class WatcherRestIT extends ESClientYamlSuiteTestCase {
         });
 
         assertBusy(() -> {
-            for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM) {
+            for (String template : XPackRestTestConstants.TEMPLATE_NAMES_NO_ILM) {
                 ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template",
                     singletonMap("name", template), emptyList(), emptyMap());
                 assertThat(templateExistsResponse.getStatusCode(), is(200));
diff --git a/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/SecuritySettingsSourceField.java b/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/SecuritySettingsSourceField.java
new file mode 100644
index 00000000000..8747d780989
--- /dev/null
+++ b/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/SecuritySettingsSourceField.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.test;
+
+import org.elasticsearch.common.CharArrays;
+import org.elasticsearch.common.settings.SecureString;
+
+import java.nio.CharBuffer;
+import java.util.Arrays;
+import java.util.Base64;
+
+public final class SecuritySettingsSourceField {
+    public static final SecureString TEST_PASSWORD_SECURE_STRING = new SecureString("x-pack-test-password".toCharArray());
+    public static final String TEST_PASSWORD = "x-pack-test-password";
+
+    private SecuritySettingsSourceField() {}
+
+    public static String basicAuthHeaderValue(String username, String passwd) {
+        return basicAuthHeaderValue(username, new SecureString(passwd.toCharArray()));
+    }
+
+    public static String basicAuthHeaderValue(String username, SecureString passwd) {
+        CharBuffer chars = CharBuffer.allocate(username.length() + passwd.length() + 1);
+        byte[] charBytes = null;
+        try {
+            chars.put(username).put(':').put(passwd.getChars());
+            charBytes = CharArrays.toUtf8Bytes(chars.array());
+
+            //TODO we still have passwords in Strings in headers. Maybe we can look into using a CharSequence?
+            String basicToken = Base64.getEncoder().encodeToString(charBytes);
+            return "Basic " + basicToken;
+        } finally {
+            Arrays.fill(chars.array(), (char) 0);
+            if (charBytes != null) {
+                Arrays.fill(charBytes, (byte) 0);
+            }
+        }
+    }
+}
diff --git a/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestConstants.java b/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestConstants.java
new file mode 100644
index 00000000000..478a2d384a2
--- /dev/null
+++ b/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestConstants.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.test.rest;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+public final class XPackRestTestConstants {
+
+    // Watcher constants:
+    public static final String INDEX_TEMPLATE_VERSION = "9";
+    public static final String HISTORY_TEMPLATE_NAME = ".watch-history-" + INDEX_TEMPLATE_VERSION;
+    public static final String HISTORY_TEMPLATE_NAME_NO_ILM = ".watch-history-no-ilm-" + INDEX_TEMPLATE_VERSION;
+    public static final String TRIGGERED_TEMPLATE_NAME = ".triggered_watches";
+    public static final String WATCHES_TEMPLATE_NAME = ".watches";
+    public static final String[] TEMPLATE_NAMES = new String[] {
+        HISTORY_TEMPLATE_NAME, TRIGGERED_TEMPLATE_NAME, WATCHES_TEMPLATE_NAME
+    };
+    public static final String[] TEMPLATE_NAMES_NO_ILM = new String[] {
+        HISTORY_TEMPLATE_NAME_NO_ILM, TRIGGERED_TEMPLATE_NAME, WATCHES_TEMPLATE_NAME
+    };
+
+    // ML constants:
+    public static final String ML_META_INDEX_NAME = ".ml-meta";
+    public static final String AUDITOR_NOTIFICATIONS_INDEX = ".ml-notifications";
+    public static final String CONFIG_INDEX = ".ml-config";
+    public static final String RESULTS_INDEX_PREFIX = ".ml-anomalies-";
+    public static final String STATE_INDEX_PREFIX = ".ml-state";
+    public static final String RESULTS_INDEX_DEFAULT = "shared";
+
+    public static final List<String> ML_PRE_V660_TEMPLATES = Collections.unmodifiableList(Arrays.asList(
+        AUDITOR_NOTIFICATIONS_INDEX, ML_META_INDEX_NAME, STATE_INDEX_PREFIX, RESULTS_INDEX_PREFIX));
+
+    public static final List<String> ML_POST_V660_TEMPLATES = Collections.unmodifiableList(Arrays.asList(
+        AUDITOR_NOTIFICATIONS_INDEX, ML_META_INDEX_NAME, STATE_INDEX_PREFIX, RESULTS_INDEX_PREFIX, CONFIG_INDEX));
+
+    private XPackRestTestConstants() {
+    }
+}
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java b/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java
similarity index 75%
rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java
rename to x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java
index 28d0ccd682f..6ad16d512ef 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java
+++ b/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java
@@ -14,14 +14,8 @@ import org.elasticsearch.client.RestClient;
 import org.elasticsearch.common.xcontent.XContentHelper;
 import org.elasticsearch.common.xcontent.json.JsonXContent;
 import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.xpack.core.ml.MlMetaIndex;
-import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex;
-import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields;
-import org.elasticsearch.xpack.core.ml.notifications.AuditorField;
 
 import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicReference;
@@ -30,19 +24,6 @@ import static org.elasticsearch.test.rest.ESRestTestCase.allowTypesRemovalWarnin
 
 public final class XPackRestTestHelper {
 
-    public static final List<String> ML_PRE_V660_TEMPLATES = Collections.unmodifiableList(
-            Arrays.asList(AuditorField.NOTIFICATIONS_INDEX,
-                    MlMetaIndex.INDEX_NAME,
-                    AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX,
-                    AnomalyDetectorsIndex.jobResultsIndexPrefix()));
-
-    public static final List<String> ML_POST_V660_TEMPLATES = Collections.unmodifiableList(
-            Arrays.asList(AuditorField.NOTIFICATIONS_INDEX,
-                    MlMetaIndex.INDEX_NAME,
-                    AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX,
-                    AnomalyDetectorsIndex.jobResultsIndexPrefix(),
-                    AnomalyDetectorsIndex.configIndexName()));
-
     private XPackRestTestHelper() {
     }
 
@@ -95,4 +76,10 @@ public final class XPackRestTestHelper {
             });
         }
     }
+
+    public static String resultsWriteAlias(String jobId) {
+        // ".write" rather than simply "write" to avoid the danger of clashing
+        // with the read alias of a job whose name begins with "write-"
+        return XPackRestTestConstants.RESULTS_INDEX_PREFIX + ".write-" + jobId;
+    }
 }

From db13043d3bad170a73668323e4bf7729f805bbef Mon Sep 17 00:00:00 2001
From: Julie Tibshirani <julie.tibshirani@elastic.co>
Date: Tue, 9 Apr 2019 10:49:01 -0700
Subject: [PATCH 028/112] Some clarifications in the 'enabled' documentation.
 (#40989)

This PR makes a few clarifications to the docs for the `enabled` setting:
- Replace references to 'mapping type' with 'mapping' or 'mapping definition'.
- In code examples, clarify that the disabled fields have type `object`.
- Add a section on how disabled fields can hold non-object data.
---
 .../reference/mapping/params/enabled.asciidoc | 39 +++++++++++++++----
 1 file changed, 32 insertions(+), 7 deletions(-)

diff --git a/docs/reference/mapping/params/enabled.asciidoc b/docs/reference/mapping/params/enabled.asciidoc
index 06b76ddeae0..7193c6aa9f6 100644
--- a/docs/reference/mapping/params/enabled.asciidoc
+++ b/docs/reference/mapping/params/enabled.asciidoc
@@ -7,11 +7,11 @@ you are using Elasticsearch as a web session store.  You may want to index the
 session ID and last update time, but you don't need to query or run
 aggregations on the session data itself.
 
-The `enabled` setting, which can be applied only to the mapping type and to
-<<object,`object`>> fields, causes Elasticsearch to skip parsing of the
-contents of the field entirely.  The JSON can still be retrieved from the
-<<mapping-source-field,`_source`>> field, but it is not searchable or stored
-in any other way:
+The `enabled` setting, which can be applied only to the top-level mapping
+definition and to <<object,`object`>> fields, causes Elasticsearch to skip
+parsing of the contents of the field entirely.  The JSON can still be retrieved
+from the <<mapping-source-field,`_source`>> field, but it is not searchable or
+stored in any other way:
 
 [source,js]
 --------------------------------------------------
@@ -26,6 +26,7 @@ PUT my_index
         "type": "date"
       },
       "session_data": { <1>
+        "type": "object",
         "enabled": false
       }
     }
@@ -55,7 +56,7 @@ PUT my_index/_doc/session_2
 <2> Any arbitrary data can be passed to the `session_data` field as it will be entirely ignored.
 <3> The `session_data` will also ignore values that are not JSON objects.
 
-The entire mapping type may be disabled as well, in which case the document is
+The entire mapping may be disabled as well, in which case the document is
 stored in the <<mapping-source-field,`_source`>> field, which means it can be
 retrieved, but none of its contents are indexed in any way:
 
@@ -84,10 +85,34 @@ GET my_index/_doc/session_1 <2>
 GET my_index/_mapping <3>
 --------------------------------------------------
 // CONSOLE
-<1> The entire mapping type is disabled.
+<1> The entire mapping is disabled.
 <2> The document can be retrieved.
 <3> Checking the mapping reveals that no fields have been added.
 
 TIP: The `enabled` setting can be updated on existing fields
 using the <<indices-put-mapping,PUT mapping API>>.
 
+Note that because Elasticsearch completely skips parsing the field
+contents, it is possible to add non-object data to a disabled field:
+[source,js]
+--------------------------------------------------
+PUT my_index
+{
+  "mappings": {
+    "properties": {
+      "session_data": {
+        "type": "object",
+        "enabled": false
+      }
+    }
+  }
+}
+
+PUT my_index/_doc/session_1
+{
+  "session_data": "foo bar" <1>
+}
+--------------------------------------------------
+// CONSOLE
+
+<1> The document is added successfully, even though `session_data` contains non-object data.
\ No newline at end of file

From 7e59794ced8ca858c2fbf0991fe8efb1228eb24b Mon Sep 17 00:00:00 2001
From: Gordon Brown <gordon.brown@elastic.co>
Date: Mon, 15 Apr 2019 16:20:37 -0600
Subject: [PATCH 029/112] Log every use of ILM Move to Step API (#41171)

Usage of the ILM Move to Step API can result in some very odd
situations, and for diagnosing problems arising from these situations it
would be nice to have a record of when this API was called with what
parameters.

Also, adds a dedicated logger for TransportMoveToStepAction,
rather than using the (deprecated) inherited one.
---
 .../xpack/indexlifecycle/IndexLifecycleRunner.java            | 3 +++
 .../indexlifecycle/action/TransportMoveToStepAction.java      | 4 ++++
 2 files changed, 7 insertions(+)

diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java
index f6c068d945d..05ad342f3e7 100644
--- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java
+++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java
@@ -307,6 +307,9 @@ public class IndexLifecycleRunner {
                 "] with policy [" + indexPolicySetting + "] does not exist");
         }
 
+        logger.info("moving index [{}] from [{}] to [{}] in policy [{}]",
+            indexName, currentStepKey, nextStepKey, indexPolicySetting);
+
         return IndexLifecycleRunner.moveClusterStateToNextStep(idxMeta.getIndex(), currentState, currentStepKey,
             nextStepKey, nowSupplier, forcePhaseDefinitionRefresh);
     }
diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java
index 57f08eba764..e5cb15f6c3b 100644
--- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java
+++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java
@@ -6,6 +6,8 @@
 
 package org.elasticsearch.xpack.indexlifecycle.action;
 
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
 import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.support.ActionFilters;
 import org.elasticsearch.action.support.master.TransportMasterNodeAction;
@@ -25,6 +27,8 @@ import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction.Respo
 import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleService;
 
 public class TransportMoveToStepAction extends TransportMasterNodeAction<Request, Response> {
+    private static final Logger logger = LogManager.getLogger(TransportMoveToStepAction.class);
+
     IndexLifecycleService indexLifecycleService;
     @Inject
     public TransportMoveToStepAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool,

From 56c00eecbc0697cf102c8f6d6e04b8e66017eae9 Mon Sep 17 00:00:00 2001
From: Tim Brooks <tim@uncontended.net>
Date: Mon, 15 Apr 2019 16:54:24 -0600
Subject: [PATCH 030/112] Remove string usages of old transport settings
 (#41207)

This is related to #36652. We intend to deprecate a number of transport
settings in 7.x and remove them in 8.0. This commit removes the string
usages of these settings.
---
 .../elasticsearch/gradle/test/ClusterFormationTasks.groovy  | 6 +++++-
 .../gradle/testclusters/ElasticsearchNode.java              | 6 +++++-
 .../discovery/single/SingleNodeDiscoveryIT.java             | 2 +-
 .../vagrant/src/test/resources/packaging/tests/certgen.bash | 2 +-
 4 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy
index 05333e47740..824cb161a63 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy
@@ -412,7 +412,11 @@ class ClusterFormationTasks {
         }
         esConfig['node.max_local_storage_nodes'] = node.config.numNodes
         esConfig['http.port'] = node.config.httpPort
-        esConfig['transport.tcp.port'] =  node.config.transportPort
+        if (node.nodeVersion.onOrAfter('6.7.0')) {
+            esConfig['transport.port'] =  node.config.transportPort
+        } else {
+            esConfig['transport.tcp.port'] =  node.config.transportPort
+        }
         // Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space
         esConfig['cluster.routing.allocation.disk.watermark.low'] = '1b'
         esConfig['cluster.routing.allocation.disk.watermark.high'] = '1b'
diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java
index 337a9bdeac7..5a3e0d599f1 100644
--- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java
@@ -605,7 +605,11 @@ public class ElasticsearchNode implements TestClusterConfiguration {
         defaultConfig.put("node.attr.testattr", "test");
         defaultConfig.put("node.portsfile", "true");
         defaultConfig.put("http.port", "0");
-        defaultConfig.put("transport.tcp.port", "0");
+        if (Version.fromString(version).onOrAfter(Version.fromString("6.7.0"))) {
+            defaultConfig.put("transport.port", "0");
+        } else {
+            defaultConfig.put("transport.tcp.port", "0");
+        }
         // Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space
         defaultConfig.put("cluster.routing.allocation.disk.watermark.low", "1b");
         defaultConfig.put("cluster.routing.allocation.disk.watermark.high", "1b");
diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java
index c3cae8f10ff..5d8e3407e18 100644
--- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java
+++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java
@@ -59,7 +59,7 @@ public class SingleNodeDiscoveryIT extends ESIntegTestCase {
                 .builder()
                 .put(super.nodeSettings(nodeOrdinal))
                 .put("discovery.type", "single-node")
-                .put("transport.tcp.port", "0")
+                .put("transport.port", "0")
                 .build();
     }
 
diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/certgen.bash b/x-pack/qa/vagrant/src/test/resources/packaging/tests/certgen.bash
index c0ae9aac4db..13aed28b4c1 100644
--- a/x-pack/qa/vagrant/src/test/resources/packaging/tests/certgen.bash
+++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/certgen.bash
@@ -259,7 +259,7 @@ xpack.security.http.ssl.certificate: $ESCONFIG/certs/node-master/node-master.crt
 xpack.security.http.ssl.certificate_authorities: ["$ESCONFIG/certs/ca/ca.crt"]
 
 xpack.security.transport.ssl.enabled: true
-transport.tcp.port: 9300
+transport.port: 9300
 
 xpack.security.http.ssl.enabled: true
 http.port: 9200

From 750db02b54daf42c1616c605fefe177d592715d7 Mon Sep 17 00:00:00 2001
From: Shaunak Kashyap <ycombinator@gmail.com>
Date: Mon, 15 Apr 2019 20:17:05 -0700
Subject: [PATCH 031/112] Expand beats_system role privileges (#40876) (#41232)

Traditionally we have [recommended](https://www.elastic.co/guide/en/beats/filebeat/current/monitoring.html) that Beats send their monitoring data to the **production** Elasticsearch cluster. Beats do this by calling the `POST _monitoring/bulk` API. When Security is enabled this API call requires the `cluster:admin/xpack/monitoring/bulk` privilege. The built-in `beats_system` role has this privilege.

[Going forward](https://github.com/elastic/beats/pull/9260), Beats will be able to send their monitoring data directly to the **monitoring** Elasticsearch cluster. Beats will do this by calling the regular `POST _bulk` API. When Security is enabled this API call requires the `indices:data/write/bulk` privilege. Further, the call has to be able to create any indices that don't exist.

This PR expands the built-in `beats_system` role's privileges. Specifically, it adds index-level `write` and `create_index` privileges for `.monitoring-beats-*` indices.

This will allow Beats users to continue using the `beats_system` role for the new direct monitoring route when Security is enabled.
---
 .../authz/store/ReservedRolesStore.java       |  7 +++-
 .../authz/store/ReservedRolesStoreTests.java  | 33 +++++++++++--------
 2 files changed, 26 insertions(+), 14 deletions(-)

diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java
index d9fded1fb2b..b767b560861 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java
@@ -133,7 +133,12 @@ public class ReservedRolesStore implements BiConsumer<Set<String>, ActionListene
                     },
                     null, MetadataUtils.DEFAULT_RESERVED_METADATA))
                 .put(UsernamesField.BEATS_ROLE, new RoleDescriptor(UsernamesField.BEATS_ROLE,
-                        new String[] { "monitor", MonitoringBulkAction.NAME}, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA))
+                        new String[] { "monitor", MonitoringBulkAction.NAME},
+                        new RoleDescriptor.IndicesPrivileges[]{
+                            RoleDescriptor.IndicesPrivileges.builder()
+                                .indices(".monitoring-beats-*").privileges("create_index", "create").build()
+                        },
+                    null, MetadataUtils.DEFAULT_RESERVED_METADATA))
                 .put(UsernamesField.APM_ROLE, new RoleDescriptor(UsernamesField.APM_ROLE,
                         new String[] { "monitor", MonitoringBulkAction.NAME}, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA))
                 .put("apm_user", new RoleDescriptor("apm_user",
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java
index c4c2ec871a5..625e5ddf47c 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java
@@ -838,23 +838,30 @@ public class ReservedRolesStoreTests extends ESTestCase {
         assertNotNull(roleDescriptor);
         assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true));
 
-        Role logstashSystemRole = Role.builder(roleDescriptor, null).build();
-        assertThat(logstashSystemRole.cluster().check(ClusterHealthAction.NAME, request), is(true));
-        assertThat(logstashSystemRole.cluster().check(ClusterStateAction.NAME, request), is(true));
-        assertThat(logstashSystemRole.cluster().check(ClusterStatsAction.NAME, request), is(true));
-        assertThat(logstashSystemRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false));
-        assertThat(logstashSystemRole.cluster().check(ClusterRerouteAction.NAME, request), is(false));
-        assertThat(logstashSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false));
-        assertThat(logstashSystemRole.cluster().check(MonitoringBulkAction.NAME, request), is(true));
+        Role beatsSystemRole = Role.builder(roleDescriptor, null).build();
+        assertThat(beatsSystemRole.cluster().check(ClusterHealthAction.NAME, request), is(true));
+        assertThat(beatsSystemRole.cluster().check(ClusterStateAction.NAME, request), is(true));
+        assertThat(beatsSystemRole.cluster().check(ClusterStatsAction.NAME, request), is(true));
+        assertThat(beatsSystemRole.cluster().check(PutIndexTemplateAction.NAME, request), is(false));
+        assertThat(beatsSystemRole.cluster().check(ClusterRerouteAction.NAME, request), is(false));
+        assertThat(beatsSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false));
+        assertThat(beatsSystemRole.cluster().check(MonitoringBulkAction.NAME, request), is(true));
 
-        assertThat(logstashSystemRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false));
+        assertThat(beatsSystemRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false));
 
-        assertThat(logstashSystemRole.indices().allowedIndicesMatcher(IndexAction.NAME).test("foo"), is(false));
-        assertThat(logstashSystemRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".reporting"), is(false));
-        assertThat(logstashSystemRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)),
+
+        final String index = ".monitoring-beats-" + randomIntBetween(0, 5);;
+        logger.info("index name [{}]", index);
+        assertThat(beatsSystemRole.indices().allowedIndicesMatcher(IndexAction.NAME).test("foo"), is(false));
+        assertThat(beatsSystemRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".reporting"), is(false));
+        assertThat(beatsSystemRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)),
                 is(false));
+        assertThat(beatsSystemRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(true));
+        assertThat(beatsSystemRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(index), is(true));
+        assertThat(beatsSystemRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(false));
+        assertThat(beatsSystemRole.indices().allowedIndicesMatcher(BulkAction.NAME).test(index), is(true));
 
-        assertNoAccessAllowed(logstashSystemRole, RestrictedIndicesNames.RESTRICTED_NAMES);
+        assertNoAccessAllowed(beatsSystemRole, RestrictedIndicesNames.RESTRICTED_NAMES);
     }
 
     public void testAPMSystemRole() {

From ad3b7abaa3db753ae0482b26bec2d0fb912b13c6 Mon Sep 17 00:00:00 2001
From: Tim Brooks <tim@uncontended.net>
Date: Mon, 15 Apr 2019 21:43:09 -0600
Subject: [PATCH 032/112] Deprecate old transport settings (#41229)

This is related to #36652. We intend to remove a number of old transport
settings in 8.0. This commit deprecates those settings for 7.x.
---
 .../common/network/NetworkService.java        |  4 +--
 .../http/HttpTransportSettings.java           |  3 +-
 .../transport/TransportSettings.java          | 31 ++++++++-----------
 .../AbstractSimpleTransportTestCase.java      |  4 +--
 4 files changed, 18 insertions(+), 24 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkService.java b/server/src/main/java/org/elasticsearch/common/network/NetworkService.java
index cde873fa577..babc83a1772 100644
--- a/server/src/main/java/org/elasticsearch/common/network/NetworkService.java
+++ b/server/src/main/java/org/elasticsearch/common/network/NetworkService.java
@@ -58,9 +58,9 @@ public final class NetworkService {
         Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), Property.NodeScope);
     public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE =
         Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), Property.NodeScope);
-    // TODO: Deprecate in 7.0
     public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT =
-        Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope);
+        Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope,
+            Setting.Property.Deprecated);
 
     /**
      * A custom name resolver can support custom lookup keys (my_net_key:ipv4) and also change
diff --git a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java
index ddd8bfa7385..2a5639f2e72 100644
--- a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java
+++ b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java
@@ -107,9 +107,8 @@ public final class HttpTransportSettings {
 
     // Tcp socket settings
 
-    // TODO: Deprecate in 7.0
     public static final Setting<Boolean> OLD_SETTING_HTTP_TCP_NO_DELAY =
-        boolSetting("http.tcp_no_delay", NetworkService.TCP_NO_DELAY, Setting.Property.NodeScope);
+        boolSetting("http.tcp_no_delay", NetworkService.TCP_NO_DELAY, Setting.Property.NodeScope, Setting.Property.Deprecated);
     public static final Setting<Boolean> SETTING_HTTP_TCP_NO_DELAY =
         boolSetting("http.tcp.no_delay", OLD_SETTING_HTTP_TCP_NO_DELAY, Setting.Property.NodeScope);
     public static final Setting<Boolean> SETTING_HTTP_TCP_KEEP_ALIVE =
diff --git a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java
index 60e230004ca..253177836a9 100644
--- a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java
+++ b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java
@@ -52,9 +52,8 @@ public final class TransportSettings {
         listSetting("transport.bind_host", HOST, Function.identity(), Setting.Property.NodeScope);
     public static final Setting.AffixSetting<List<String>> BIND_HOST_PROFILE = affixKeySetting("transport.profiles.", "bind_host",
         key -> listSetting(key, BIND_HOST, Function.identity(), Setting.Property.NodeScope));
-    // TODO: Deprecate in 7.0
     public static final Setting<String> OLD_PORT =
-        new Setting<>("transport.tcp.port", "9300-9400", Function.identity(), Setting.Property.NodeScope);
+        new Setting<>("transport.tcp.port", "9300-9400", Function.identity(), Setting.Property.NodeScope, Setting.Property.Deprecated);
     public static final Setting<String> PORT =
         new Setting<>("transport.port", OLD_PORT, Function.identity(), Setting.Property.NodeScope);
     public static final Setting.AffixSetting<String> PORT_PROFILE = affixKeySetting("transport.profiles.", "port",
@@ -63,31 +62,29 @@ public final class TransportSettings {
         intSetting("transport.publish_port", -1, -1, Setting.Property.NodeScope);
     public static final Setting.AffixSetting<Integer> PUBLISH_PORT_PROFILE = affixKeySetting("transport.profiles.", "publish_port",
         key -> intSetting(key, -1, -1, Setting.Property.NodeScope));
-    // TODO: Deprecate in 7.0
     public static final Setting<Boolean> OLD_TRANSPORT_COMPRESS =
-        boolSetting("transport.tcp.compress", false, Setting.Property.NodeScope);
+        boolSetting("transport.tcp.compress", false, Setting.Property.NodeScope, Setting.Property.Deprecated);
     public static final Setting<Boolean> TRANSPORT_COMPRESS =
         boolSetting("transport.compress", OLD_TRANSPORT_COMPRESS, Setting.Property.NodeScope);
     // the scheduled internal ping interval setting, defaults to disabled (-1)
     public static final Setting<TimeValue> PING_SCHEDULE =
         timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), Setting.Property.NodeScope);
-    // TODO: Deprecate in 7.0
     public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT =
-        timeSetting("transport.tcp.connect_timeout", NetworkService.TCP_CONNECT_TIMEOUT, Setting.Property.NodeScope);
+        timeSetting("transport.tcp.connect_timeout", NetworkService.TCP_CONNECT_TIMEOUT, Setting.Property.NodeScope,
+            Setting.Property.Deprecated);
     public static final Setting<TimeValue> CONNECT_TIMEOUT =
         timeSetting("transport.connect_timeout", TCP_CONNECT_TIMEOUT, Setting.Property.NodeScope);
     public static final Setting<Settings> DEFAULT_FEATURES_SETTING = Setting.groupSetting(FEATURE_PREFIX + ".", Setting.Property.NodeScope);
 
     // Tcp socket settings
 
-    // TODO: Deprecate in 7.0
     public static final Setting<Boolean> OLD_TCP_NO_DELAY =
-        boolSetting("transport.tcp_no_delay", NetworkService.TCP_NO_DELAY, Setting.Property.NodeScope);
+        boolSetting("transport.tcp_no_delay", NetworkService.TCP_NO_DELAY, Setting.Property.NodeScope, Setting.Property.Deprecated);
     public static final Setting<Boolean> TCP_NO_DELAY =
         boolSetting("transport.tcp.no_delay", OLD_TCP_NO_DELAY, Setting.Property.NodeScope);
-    // TODO: Deprecate in 7.0
     public static final Setting.AffixSetting<Boolean> OLD_TCP_NO_DELAY_PROFILE =
-        affixKeySetting("transport.profiles.", "tcp_no_delay", key -> boolSetting(key, TCP_NO_DELAY, Setting.Property.NodeScope));
+        affixKeySetting("transport.profiles.", "tcp_no_delay", key -> boolSetting(key, TCP_NO_DELAY, Setting.Property.NodeScope,
+            Setting.Property.Deprecated));
     public static final Setting.AffixSetting<Boolean> TCP_NO_DELAY_PROFILE =
         affixKeySetting("transport.profiles.", "tcp.no_delay",
             key -> boolSetting(key,
@@ -95,9 +92,9 @@ public final class TransportSettings {
                 Setting.Property.NodeScope));
     public static final Setting<Boolean> TCP_KEEP_ALIVE =
         boolSetting("transport.tcp.keep_alive", NetworkService.TCP_KEEP_ALIVE, Setting.Property.NodeScope);
-    // TODO: Deprecate in 7.0
     public static final Setting.AffixSetting<Boolean> OLD_TCP_KEEP_ALIVE_PROFILE =
-        affixKeySetting("transport.profiles.", "tcp_keep_alive", key -> boolSetting(key, TCP_KEEP_ALIVE, Setting.Property.NodeScope));
+        affixKeySetting("transport.profiles.", "tcp_keep_alive",
+            key -> boolSetting(key, TCP_KEEP_ALIVE, Setting.Property.NodeScope, Setting.Property.Deprecated));
     public static final Setting.AffixSetting<Boolean> TCP_KEEP_ALIVE_PROFILE =
         affixKeySetting("transport.profiles.", "tcp.keep_alive",
             key -> boolSetting(key,
@@ -105,9 +102,9 @@ public final class TransportSettings {
                 Setting.Property.NodeScope));
     public static final Setting<Boolean> TCP_REUSE_ADDRESS =
         boolSetting("transport.tcp.reuse_address", NetworkService.TCP_REUSE_ADDRESS, Setting.Property.NodeScope);
-    // TODO: Deprecate in 7.0
     public static final Setting.AffixSetting<Boolean> OLD_TCP_REUSE_ADDRESS_PROFILE =
-        affixKeySetting("transport.profiles.", "reuse_address", key -> boolSetting(key, TCP_REUSE_ADDRESS, Setting.Property.NodeScope));
+        affixKeySetting("transport.profiles.", "reuse_address", key -> boolSetting(key, TCP_REUSE_ADDRESS, Setting.Property.NodeScope,
+            Setting.Property.Deprecated));
     public static final Setting.AffixSetting<Boolean> TCP_REUSE_ADDRESS_PROFILE =
         affixKeySetting("transport.profiles.", "tcp.reuse_address",
             key -> boolSetting(key,
@@ -115,10 +112,9 @@ public final class TransportSettings {
                 Setting.Property.NodeScope));
     public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE =
         Setting.byteSizeSetting("transport.tcp.send_buffer_size", NetworkService.TCP_SEND_BUFFER_SIZE, Setting.Property.NodeScope);
-    // TODO: Deprecate in 7.0
     public static final Setting.AffixSetting<ByteSizeValue> OLD_TCP_SEND_BUFFER_SIZE_PROFILE =
         affixKeySetting("transport.profiles.", "send_buffer_size",
-            key -> Setting.byteSizeSetting(key, TCP_SEND_BUFFER_SIZE, Setting.Property.NodeScope));
+            key -> Setting.byteSizeSetting(key, TCP_SEND_BUFFER_SIZE, Setting.Property.NodeScope, Setting.Property.Deprecated));
     public static final Setting.AffixSetting<ByteSizeValue> TCP_SEND_BUFFER_SIZE_PROFILE =
         affixKeySetting("transport.profiles.", "tcp.send_buffer_size",
             key -> Setting.byteSizeSetting(key,
@@ -126,10 +122,9 @@ public final class TransportSettings {
                 Setting.Property.NodeScope));
     public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE =
         Setting.byteSizeSetting("transport.tcp.receive_buffer_size", NetworkService.TCP_RECEIVE_BUFFER_SIZE, Setting.Property.NodeScope);
-    // TODO: Deprecate in 7.0
     public static final Setting.AffixSetting<ByteSizeValue> OLD_TCP_RECEIVE_BUFFER_SIZE_PROFILE =
         affixKeySetting("transport.profiles.", "receive_buffer_size",
-            key -> Setting.byteSizeSetting(key, TCP_RECEIVE_BUFFER_SIZE, Setting.Property.NodeScope));
+            key -> Setting.byteSizeSetting(key, TCP_RECEIVE_BUFFER_SIZE, Setting.Property.NodeScope, Setting.Property.Deprecated));
     public static final Setting.AffixSetting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE_PROFILE =
         affixKeySetting("transport.profiles.", "tcp.receive_buffer_size",
             key -> Setting.byteSizeSetting(key,
diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java
index 32215b204fb..567607c6e15 100644
--- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java
@@ -2585,7 +2585,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
         Settings defaultProfileSettings = Settings.builder()
             .put("transport.profiles.default.tcp.no_delay", enable)
             .put("transport.profiles.default.tcp.keep_alive", enable)
-            .put("transport.profiles.default.reuse_address", enable)
+            .put("transport.profiles.default.tcp.reuse_address", enable)
             .put("transport.profiles.default.tcp.send_buffer_size", "43000b")
             .put("transport.profiles.default.tcp.receive_buffer_size", "42000b")
             .put("transport.profiles.default.port", "9700-9800")
@@ -2598,7 +2598,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
         Settings profileSettings = Settings.builder()
             .put("transport.profiles.some_profile.tcp.no_delay", enable)
             .put("transport.profiles.some_profile.tcp.keep_alive", enable)
-            .put("transport.profiles.some_profile.reuse_address", enable)
+            .put("transport.profiles.some_profile.tcp.reuse_address", enable)
             .put("transport.profiles.some_profile.tcp.send_buffer_size", "43000b")
             .put("transport.profiles.some_profile.tcp.receive_buffer_size", "42000b")
             .put("transport.profiles.some_profile.port", "9700-9800")

From 36a8c7aa0bf04aeeef82f4073ccb3456dd16163e Mon Sep 17 00:00:00 2001
From: David Turner <david.turner@elastic.co>
Date: Mon, 15 Apr 2019 19:22:10 +0100
Subject: [PATCH 033/112] Add 'DO NOT TOUCH' warnings to disco settings docs
 (#41211)

---
 .../discovery/discovery-settings.asciidoc     | 68 ++++++++++++-------
 1 file changed, 45 insertions(+), 23 deletions(-)

diff --git a/docs/reference/modules/discovery/discovery-settings.asciidoc b/docs/reference/modules/discovery/discovery-settings.asciidoc
index 3386fd66b49..aaa39c9db62 100644
--- a/docs/reference/modules/discovery/discovery-settings.asciidoc
+++ b/docs/reference/modules/discovery/discovery-settings.asciidoc
@@ -25,6 +25,23 @@ Discovery and cluster formation are affected by the following settings:
     compatibility. Support for the old name will be removed in a future
     version.
 
+`cluster.initial_master_nodes`::
+
+    Sets a list of the <<node.name,node names>> or transport addresses of the
+    initial set of master-eligible nodes in a brand-new cluster. By default
+    this list is empty, meaning that this node expects to join a cluster that
+    has already been bootstrapped. See <<initial_master_nodes>>.
+
+[float]
+==== Expert settings
+
+Discovery and cluster formation are also affected by the following
+_expert-level_ settings, although it is not recommended to change any of these
+from their default values.
+
+[WARNING] If you adjust these settings then your cluster may not form correctly
+or may become unstable or intolerant of certain failures.
+
 `discovery.cluster_formation_warning_timeout`::
 
     Sets how long a node will try to form a cluster before logging a warning
@@ -49,6 +66,7 @@ Discovery and cluster formation are affected by the following settings:
     handshake. Defaults to `1s`.
 
 `discovery.request_peers_timeout`::
+
     Sets how long a node will wait after asking its peers again before
     considering the request to have failed. Defaults to `3s`.
 
@@ -83,73 +101,78 @@ Discovery and cluster formation are affected by the following settings:
 
     Sets the amount to increase the upper bound on the wait before an election
     on each election failure. Note that this is _linear_ backoff. This defaults
-    to `100ms`
+    to `100ms`. Changing this setting from the default may cause your cluster
+    to fail to elect a master node.
 
 `cluster.election.duration`::
 
-    Sets how long each election is allowed to take before a node considers it to
-    have failed and schedules a retry. This defaults to `500ms`.
+    Sets how long each election is allowed to take before a node considers it
+    to have failed and schedules a retry. This defaults to `500ms`.  Changing
+    this setting from the default may cause your cluster to fail to elect a
+    master node.
 
 `cluster.election.initial_timeout`::
 
     Sets the upper bound on how long a node will wait initially, or after the
     elected master fails, before attempting its first election. This defaults
-    to `100ms`.
-
+    to `100ms`. Changing this setting from the default may cause your cluster
+    to fail to elect a master node.
 
 `cluster.election.max_timeout`::
 
     Sets the maximum upper bound on how long a node will wait before attempting
     an first election, so that an network partition that lasts for a long time
-    does not result in excessively sparse elections. This defaults to `10s`
+    does not result in excessively sparse elections. This defaults to `10s`.
+    Changing this setting from the default may cause your cluster to fail to
+    elect a master node.
 
 [[fault-detection-settings]]`cluster.fault_detection.follower_check.interval`::
 
     Sets how long the elected master waits between follower checks to each
-    other node in the cluster. Defaults to `1s`.
+    other node in the cluster. Defaults to `1s`. Changing this setting from the
+    default may cause your cluster to become unstable.
 
 `cluster.fault_detection.follower_check.timeout`::
 
     Sets how long the elected master waits for a response to a follower check
-    before considering it to have failed. Defaults to `10s`.
+    before considering it to have failed. Defaults to `10s`. Changing this
+    setting from the default may cause your cluster to become unstable.
 
 `cluster.fault_detection.follower_check.retry_count`::
 
     Sets how many consecutive follower check failures must occur to each node
     before the elected master considers that node to be faulty and removes it
-    from the cluster. Defaults to `3`.
+    from the cluster. Defaults to `3`. Changing this setting from the default
+    may cause your cluster to become unstable.
 
 `cluster.fault_detection.leader_check.interval`::
 
     Sets how long each node waits between checks of the elected master.
-    Defaults to `1s`.
+    Defaults to `1s`. Changing this setting from the default may cause your
+    cluster to become unstable.
 
 `cluster.fault_detection.leader_check.timeout`::
 
     Sets how long each node waits for a response to a leader check from the
     elected master before considering it to have failed. Defaults to `10s`.
+    Changing this setting from the default may cause your cluster to become
+    unstable.
 
 `cluster.fault_detection.leader_check.retry_count`::
 
     Sets how many consecutive leader check failures must occur before a node
     considers the elected master to be faulty and attempts to find or elect a
-    new master. Defaults to `3`.
+    new master. Defaults to `3`. Changing this setting from the default may
+    cause your cluster to become unstable.
 
 `cluster.follower_lag.timeout`::
 
     Sets how long the master node waits to receive acknowledgements for cluster
-    state updates from lagging nodes. The default value is `90s`. If a node does
-    not successfully apply the cluster state update within this period of time,
-    it is considered to have failed and is removed from the cluster. See
+    state updates from lagging nodes. The default value is `90s`. If a node
+    does not successfully apply the cluster state update within this period of
+    time, it is considered to have failed and is removed from the cluster. See
     <<cluster-state-publishing>>.
 
-`cluster.initial_master_nodes`::
-
-    Sets a list of the <<node.name,node names>> or transport addresses of the
-    initial set of master-eligible nodes in a brand-new cluster. By default
-    this list is empty, meaning that this node expects to join a cluster that
-    has already been bootstrapped. See <<initial_master_nodes>>.
-
 `cluster.join.timeout`::
 
     Sets how long a node will wait after sending a request to join a cluster
@@ -165,8 +188,7 @@ Discovery and cluster formation are affected by the following settings:
 `cluster.publish.timeout`::
 
     Sets how long the master node waits for each cluster state update to be
-    completely published to all nodes. The default value is `30s`. If this
-    period of time elapses, the cluster state change is rejected. See
+    completely published to all nodes. The default value is `30s`. See
     <<cluster-state-publishing>>.
 
 [[no-master-block]]`cluster.no_master_block`::

From f8161ffa88b5784e3ca55898f31c280dcaab4186 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christoph=20B=C3=BCscher?= <cbuescher@posteo.de>
Date: Tue, 16 Apr 2019 10:03:35 +0200
Subject: [PATCH 034/112] Fix some `range` query edge cases (#41160)

Currently we throw an error when a range querys minimum value exceeds the
maximum value due to the fact that they are neighbouring values and both upper
and lower value are excluded from the interval.

Since this is a condition that the user usually doesn't specify conciously (at
least in the case of float and double values its difficult to see which values
are adjacent) we should ignore those "wrong" intervals and create a
MatchNoDocsQuery in those cases.

We should still throw errors with an actionable message if the user specifies
the query interval in a way that min value > max value. This PR adds those
checks and tests for those cases.

Closes #40937
---
 .../index/mapper/RangeFieldMapper.java        | 126 +++++++++++------
 .../index/mapper/RangeFieldTypeTests.java     | 129 +++++++++++++++++-
 2 files changed, 209 insertions(+), 46 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java
index e5ba55de7bf..1ee2b6f059a 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java
@@ -35,10 +35,12 @@ import org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType;
 import org.apache.lucene.search.BoostQuery;
 import org.apache.lucene.search.DocValuesFieldExistsQuery;
 import org.apache.lucene.search.IndexOrDocValuesQuery;
+import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.ByteArrayDataOutput;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FutureArrays;
 import org.elasticsearch.ElasticsearchException;
 import org.elasticsearch.common.Explicit;
 import org.elasticsearch.common.Nullable;
@@ -70,6 +72,7 @@ import java.util.Locale;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
+import java.util.function.BiFunction;
 
 import static org.elasticsearch.index.query.RangeQueryBuilder.GTE_FIELD;
 import static org.elasticsearch.index.query.RangeQueryBuilder.GT_FIELD;
@@ -516,25 +519,38 @@ public class RangeFieldMapper extends FieldMapper {
             }
 
             @Override
-            public Query withinQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
-                InetAddress lower = (InetAddress)from;
-                InetAddress upper = (InetAddress)to;
-                return InetAddressRange.newWithinQuery(field,
-                    includeLower ? lower : nextUp(lower), includeUpper ? upper : nextDown(upper));
+            public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
+                return createQuery(field, from, to, includeFrom, includeTo,
+                        (f, t) -> InetAddressRange.newWithinQuery(field, f, t));
             }
             @Override
-            public Query containsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
-                InetAddress lower = (InetAddress)from;
-                InetAddress upper = (InetAddress)to;
-                return InetAddressRange.newContainsQuery(field,
-                    includeLower ? lower : nextUp(lower), includeUpper ? upper : nextDown(upper));
+            public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
+                return createQuery(field, from, to, includeFrom, includeTo,
+                        (f, t) -> InetAddressRange.newContainsQuery(field, f, t ));
             }
             @Override
-            public Query intersectsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
-                InetAddress lower = (InetAddress)from;
-                InetAddress upper = (InetAddress)to;
-                return InetAddressRange.newIntersectsQuery(field,
-                    includeLower ? lower : nextUp(lower), includeUpper ? upper : nextDown(upper));
+            public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
+                return createQuery(field, from, to, includeFrom, includeTo,
+                        (f, t) -> InetAddressRange.newIntersectsQuery(field, f ,t ));
+            }
+
+            private Query createQuery(String field, Object lower, Object upper, boolean includeLower, boolean includeUpper,
+                    BiFunction<InetAddress, InetAddress, Query> querySupplier) {
+                byte[] lowerBytes = InetAddressPoint.encode((InetAddress) lower);
+                byte[] upperBytes = InetAddressPoint.encode((InetAddress) upper);
+                if (FutureArrays.compareUnsigned(lowerBytes, 0, lowerBytes.length, upperBytes, 0, upperBytes.length) > 0) {
+                    throw new IllegalArgumentException(
+                            "Range query `from` value (" + lower + ") is greater than `to` value (" + upper + ")");
+                }
+                InetAddress correctedFrom = includeLower ? (InetAddress) lower : nextUp(lower);
+                InetAddress correctedTo = includeUpper ? (InetAddress) upper : nextDown(upper);;
+                lowerBytes = InetAddressPoint.encode(correctedFrom);
+                upperBytes = InetAddressPoint.encode(correctedTo);
+                if (FutureArrays.compareUnsigned(lowerBytes, 0, lowerBytes.length, upperBytes, 0, upperBytes.length) > 0) {
+                    return new MatchNoDocsQuery("float range didn't intersect anything");
+                } else {
+                    return querySupplier.apply(correctedFrom, correctedTo);
+                }
             }
         },
         DATE("date_range", NumberType.LONG) {
@@ -662,21 +678,18 @@ public class RangeFieldMapper extends FieldMapper {
             }
             @Override
             public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
-                return FloatRange.newWithinQuery(field,
-                    new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)},
-                    new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)});
+                return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo,
+                        (f, t) -> FloatRange.newWithinQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT);
             }
             @Override
             public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
-                return FloatRange.newContainsQuery(field,
-                    new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)},
-                    new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)});
+                return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo,
+                        (f, t) -> FloatRange.newContainsQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT);
             }
             @Override
             public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
-                return FloatRange.newIntersectsQuery(field,
-                    new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)},
-                    new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)});
+                return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo,
+                        (f, t) -> FloatRange.newIntersectsQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT);
             }
         },
         DOUBLE("double_range", NumberType.DOUBLE) {
@@ -724,22 +737,20 @@ public class RangeFieldMapper extends FieldMapper {
             }
             @Override
             public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
-                return DoubleRange.newWithinQuery(field,
-                    new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)},
-                    new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)});
+                return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo,
+                        (f, t) -> DoubleRange.newWithinQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE);
             }
             @Override
             public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
-                return DoubleRange.newContainsQuery(field,
-                    new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)},
-                    new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)});
+                return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo,
+                        (f, t) -> DoubleRange.newContainsQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE);
             }
             @Override
             public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
-                return DoubleRange.newIntersectsQuery(field,
-                    new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)},
-                    new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)});
+                return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo,
+                        (f, t) -> DoubleRange.newIntersectsQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE);
             }
+
         },
         // todo add BYTE support
         // todo add SHORT support
@@ -777,18 +788,18 @@ public class RangeFieldMapper extends FieldMapper {
             }
             @Override
             public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
-                return IntRange.newWithinQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)},
-                    new int[] {(Integer)to - (includeTo ? 0 : 1)});
+                return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo,
+                        (f, t) -> IntRange.newWithinQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER);
             }
             @Override
             public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
-                return IntRange.newContainsQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)},
-                    new int[] {(Integer)to - (includeTo ? 0 : 1)});
+                return createQuery(field,  (Integer) from,  (Integer) to, includeFrom, includeTo,
+                        (f, t) -> IntRange.newContainsQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER);
             }
             @Override
             public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
-                return IntRange.newIntersectsQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)},
-                    new int[] {(Integer)to - (includeTo ? 0 : 1)});
+                return createQuery(field,  (Integer) from,  (Integer) to, includeFrom, includeTo,
+                        (f, t) -> IntRange.newIntersectsQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER);
             }
         },
         LONG("long_range", NumberType.LONG) {
@@ -837,18 +848,18 @@ public class RangeFieldMapper extends FieldMapper {
             }
             @Override
             public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
-                return LongRange.newWithinQuery(field,  new long[] {(Long)from + (includeFrom ? 0 : 1)},
-                    new long[] {(Long)to - (includeTo ? 0 : 1)});
+                return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo,
+                        (f, t) -> LongRange.newWithinQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG);
             }
             @Override
             public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
-                return LongRange.newContainsQuery(field,  new long[] {(Long)from + (includeFrom ? 0 : 1)},
-                    new long[] {(Long)to - (includeTo ? 0 : 1)});
+                return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo,
+                        (f, t) -> LongRange.newContainsQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG);
             }
             @Override
             public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
-                return LongRange.newIntersectsQuery(field,  new long[] {(Long)from + (includeFrom ? 0 : 1)},
-                    new long[] {(Long)to - (includeTo ? 0 : 1)});
+                return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo,
+                        (f, t) -> LongRange.newIntersectsQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG);
             }
         };
 
@@ -867,6 +878,31 @@ public class RangeFieldMapper extends FieldMapper {
             return name;
         }
 
+        /**
+         * Internal helper to create the actual {@link Query} using the provided supplier function. Before creating the query we check if
+         * the intervals min &gt; max, in which case an {@link IllegalArgumentException} is raised. The method adapts the interval bounds
+         * based on whether the edges should be included or excluded. In case where after this correction the interval would be empty
+         * because min &gt; max, we simply return a {@link MatchNoDocsQuery}.
+         * This helper handles all {@link Number} cases and dates, the IP range type uses its own logic.
+         */
+        private static <T extends Comparable<T>> Query createQuery(String field, T from, T to, boolean includeFrom, boolean includeTo,
+                BiFunction<T, T, Query> querySupplier, RangeType rangeType) {
+            if (from.compareTo(to) > 0) {
+                // wrong argument order, this is an error the user should fix
+                throw new IllegalArgumentException("Range query `from` value (" + from + ") is greater than `to` value (" + to + ")");
+            }
+
+            @SuppressWarnings("unchecked")
+            T correctedFrom = includeFrom ? from : (T) rangeType.nextUp(from);
+            @SuppressWarnings("unchecked")
+            T correctedTo =  includeTo ? to : (T) rangeType.nextDown(to);
+            if (correctedFrom.compareTo(correctedTo) > 0) {
+                return new MatchNoDocsQuery("range didn't intersect anything");
+            } else {
+                return querySupplier.apply(correctedFrom, correctedTo);
+            }
+        }
+
         public abstract Field getRangeField(String name, Range range);
         public List<IndexableField> createFields(ParseContext context, String name, Range range, boolean indexed,
                                                  boolean docValued, boolean stored) {
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java
index 6ca98fb4db6..a26999fa3a6 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java
@@ -28,6 +28,7 @@ import org.apache.lucene.document.LongRange;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.queries.BinaryDocValuesRangeQuery;
 import org.apache.lucene.search.IndexOrDocValuesQuery;
+import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.util.BytesRef;
 import org.elasticsearch.ElasticsearchParseException;
@@ -49,6 +50,7 @@ import java.net.InetAddress;
 import java.util.Locale;
 
 import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.instanceOf;
 
 public class RangeFieldTypeTests extends FieldTypeTestCase {
     RangeType type;
@@ -92,11 +94,136 @@ public class RangeFieldTypeTests extends FieldTypeTestCase {
         boolean includeUpper = randomBoolean();
         Object from = nextFrom();
         Object to = nextTo(from);
+        if (includeLower == false && includeUpper == false) {
+            // need to increase once more, otherwise interval is empty because edge values are exclusive
+            to = nextTo(to);
+        }
 
         assertEquals(getExpectedRangeQuery(relation, from, to, includeLower, includeUpper),
             ft.rangeQuery(from, to, includeLower, includeUpper, relation, null, null, context));
     }
 
+    /**
+     * test the queries are correct if from/to are adjacent and the range is exclusive of those values
+     */
+    public void testRangeQueryIntersectsAdjacentValues() throws Exception {
+        QueryShardContext context = createContext();
+        ShapeRelation relation = randomFrom(ShapeRelation.values());
+        RangeFieldType ft = new RangeFieldType(type);
+        ft.setName(FIELDNAME);
+        ft.setIndexOptions(IndexOptions.DOCS);
+
+        Object from = null;
+        Object to = null;
+        switch (type) {
+            case LONG: {
+                long fromValue = randomLong();
+                from = fromValue;
+                to = fromValue + 1;
+                break;
+            }
+            case DATE: {
+                long fromValue = randomInt();
+                from = new DateTime(fromValue);
+                to = new DateTime(fromValue + 1);
+                break;
+            }
+            case INTEGER: {
+                int fromValue = randomInt();
+                from = fromValue;
+                to = fromValue + 1;
+                break;
+            }
+            case DOUBLE: {
+                double fromValue = randomDoubleBetween(0, 100, true);
+                from = fromValue;
+                to = Math.nextUp(fromValue);
+                break;
+            }
+            case FLOAT: {
+                float fromValue = randomFloat();
+                from = fromValue;
+                to = Math.nextUp(fromValue);
+                break;
+            }
+            case IP: {
+                byte[] ipv4 = new byte[4];
+                random().nextBytes(ipv4);
+                InetAddress fromValue = InetAddress.getByAddress(ipv4);
+                from = fromValue;
+                to = InetAddressPoint.nextUp(fromValue);
+                break;
+            }
+            default:
+                from = nextFrom();
+                to = nextTo(from);
+        }
+        Query rangeQuery = ft.rangeQuery(from, to, false, false, relation, null, null, context);
+            assertThat(rangeQuery, instanceOf(IndexOrDocValuesQuery.class));
+            assertThat(((IndexOrDocValuesQuery) rangeQuery).getIndexQuery(), instanceOf(MatchNoDocsQuery.class));
+    }
+    
+    /**
+     * check that we catch cases where the user specifies larger "from" than "to" value, not counting the include upper/lower settings
+     */
+    public void testFromLargerToErrors() throws Exception {
+        QueryShardContext context = createContext();
+        RangeFieldType ft = new RangeFieldType(type);
+        ft.setName(FIELDNAME);
+        ft.setIndexOptions(IndexOptions.DOCS);
+
+        final Object from;
+        final Object to;
+        switch (type) {
+            case LONG: {
+                long fromValue = randomLong();
+                from = fromValue;
+                to = fromValue - 1L;
+                break;
+            }
+            case DATE: {
+                long fromValue = randomInt();
+                from = new DateTime(fromValue);
+                to = new DateTime(fromValue - 1);
+                break;
+            }
+            case INTEGER: {
+                int fromValue = randomInt();
+                from = fromValue;
+                to = fromValue - 1;
+                break;
+            }
+            case DOUBLE: {
+                double fromValue = randomDoubleBetween(0, 100, true);
+                from = fromValue;
+                to = fromValue - 1.0d;
+                break;
+            }
+            case FLOAT: {
+                float fromValue = randomFloat();
+                from = fromValue;
+                to = fromValue - 1.0f;
+                break;
+            }
+            case IP: {
+                byte[] ipv4 = new byte[4];
+                random().nextBytes(ipv4);
+                InetAddress fromValue = InetAddress.getByAddress(ipv4);
+                from = fromValue;
+                to = InetAddressPoint.nextDown(fromValue);
+                break;
+            }
+            default:
+                // quit test for other range types
+                return;
+        }
+        ShapeRelation relation = randomFrom(ShapeRelation.values());
+        IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
+                () ->   ft.rangeQuery(from, to, true, true, relation, null, null, context));
+        assertTrue(ex.getMessage().contains("Range query `from` value"));
+        assertTrue(ex.getMessage().contains("is greater than `to` value"));
+    }
+
     private QueryShardContext createContext() {
         Settings indexSettings = Settings.builder()
             .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
@@ -104,7 +231,7 @@ public class RangeFieldTypeTests extends FieldTypeTestCase {
         return new QueryShardContext(0, idxSettings, null, null, null, null, null, xContentRegistry(),
             writableRegistry(), null, null, () -> nowInMillis, null);
     }
-
+    
     public void testDateRangeQueryUsingMappingFormat() {
         QueryShardContext context = createContext();
         RangeFieldType fieldType = new RangeFieldType(RangeType.DATE);

From 8ee84f22687d7d9aeff5631f1394a9b25c823f39 Mon Sep 17 00:00:00 2001
From: Nhat Nguyen <nhat.nguyen@elastic.co>
Date: Tue, 16 Apr 2019 04:13:47 -0400
Subject: [PATCH 035/112] Correct flush parameters in engine test

Since #40213, we forbid a combination of flush parameters: force=true
and wait_if_ongoing=false.

Closes #41236
---
 .../org/elasticsearch/index/engine/InternalEngineTests.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
index 3636967e661..dba3fec9213 100644
--- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
+++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
@@ -3104,7 +3104,7 @@ public class InternalEngineTests extends EngineTestCase {
                                 break;
                             }
                             case "flush": {
-                                engine.flush(true, false);
+                                engine.flush(true, true);
                                 break;
                             }
                         }

From 10e58210a021f4b849c812a06892f281aaa834cd Mon Sep 17 00:00:00 2001
From: David Turner <david.turner@elastic.co>
Date: Tue, 16 Apr 2019 12:49:47 +0100
Subject: [PATCH 036/112] Validate cluster UUID when joining Zen1 cluster
 (#41063)

Today we fail to join a Zen2 cluster if the cluster UUID does not match our
own, but we do not perform the same validation when joining a Zen1 cluster.
This means that a Zen2 node will pass join validation and be added to a Zen1
cluster but will reject all cluster states from the master.

Relates #37775
---
 .../cluster/coordination/JoinHelper.java      |  8 +++
 .../cluster/coordination/JoinHelperTests.java | 52 +++++++++++++++++++
 2 files changed, 60 insertions(+)

diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java
index c25e8124fc4..b2206d4b426 100644
--- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java
+++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java
@@ -153,6 +153,14 @@ public class JoinHelper {
         transportService.registerRequestHandler(MembershipAction.DISCOVERY_JOIN_VALIDATE_ACTION_NAME,
             ValidateJoinRequest::new, ThreadPool.Names.GENERIC,
             (request, channel, task) -> {
+                final ClusterState localState = currentStateSupplier.get();
+                if (localState.metaData().clusterUUIDCommitted() &&
+                    localState.metaData().clusterUUID().equals(request.getState().metaData().clusterUUID()) == false) {
+                    throw new CoordinationStateRejectedException("mixed-version cluster join validation on cluster state" +
+                        " with a different cluster uuid " + request.getState().metaData().clusterUUID() +
+                        " than local cluster uuid " + localState.metaData().clusterUUID()
+                        + ", rejecting");
+                }
                 joinValidators.forEach(action -> action.accept(transportService.getLocalNode(), request.getState()));
                 channel.sendResponse(Empty.INSTANCE);
             });
diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java
index 877d2a5a487..d354c1d46b2 100644
--- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java
@@ -20,12 +20,19 @@ package org.elasticsearch.cluster.coordination;
 
 import org.apache.logging.log4j.Level;
 import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionListenerResponseHandler;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.NotMasterException;
+import org.elasticsearch.cluster.metadata.MetaData;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.zen.MembershipAction;
 import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.test.transport.CapturingTransport;
 import org.elasticsearch.test.transport.CapturingTransport.CapturedRequest;
+import org.elasticsearch.test.transport.MockTransport;
 import org.elasticsearch.transport.RemoteTransportException;
 import org.elasticsearch.transport.TransportException;
 import org.elasticsearch.transport.TransportResponse;
@@ -35,6 +42,7 @@ import java.util.Collections;
 import java.util.Optional;
 
 import static org.elasticsearch.node.Node.NODE_NAME_SETTING;
+import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.core.Is.is;
 
@@ -131,4 +139,48 @@ public class JoinHelperTests extends ESTestCase {
                 new RemoteTransportException("caused by NotMasterException",
                         new NotMasterException("test"))), is(Level.DEBUG));
     }
+
+    public void testZen1JoinValidationRejectsMismatchedClusterUUID() {
+        assertJoinValidationRejectsMismatchedClusterUUID(MembershipAction.DISCOVERY_JOIN_VALIDATE_ACTION_NAME,
+            "mixed-version cluster join validation on cluster state with a different cluster uuid");
+    }
+
+    public void testJoinValidationRejectsMismatchedClusterUUID() {
+        assertJoinValidationRejectsMismatchedClusterUUID(JoinHelper.VALIDATE_JOIN_ACTION_NAME,
+            "join validation on cluster state with a different cluster uuid");
+    }
+
+    private void assertJoinValidationRejectsMismatchedClusterUUID(String actionName, String expectedMessage) {
+        DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(
+            Settings.builder().put(NODE_NAME_SETTING.getKey(), "node0").build(), random());
+        MockTransport mockTransport = new MockTransport();
+        DiscoveryNode localNode = new DiscoveryNode("node0", buildNewFakeTransportAddress(), Version.CURRENT);
+
+        final ClusterState localClusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(MetaData.builder()
+            .generateClusterUuidIfNeeded().clusterUUIDCommitted(true)).build();
+
+        TransportService transportService = mockTransport.createTransportService(Settings.EMPTY,
+            deterministicTaskQueue.getThreadPool(), TransportService.NOOP_TRANSPORT_INTERCEPTOR,
+            x -> localNode, null, Collections.emptySet());
+        new JoinHelper(Settings.EMPTY, null, null, transportService, () -> 0L, () -> localClusterState,
+            (joinRequest, joinCallback) -> { throw new AssertionError(); }, startJoinRequest -> { throw new AssertionError(); },
+            Collections.emptyList()); // registers request handler
+        transportService.start();
+        transportService.acceptIncomingRequests();
+
+        final ClusterState otherClusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(MetaData.builder()
+            .generateClusterUuidIfNeeded()).build();
+
+        final PlainActionFuture<TransportResponse.Empty> future = new PlainActionFuture<>();
+        transportService.sendRequest(localNode, actionName,
+            new ValidateJoinRequest(otherClusterState),
+            new ActionListenerResponseHandler<>(future, in -> TransportResponse.Empty.INSTANCE));
+        deterministicTaskQueue.runAllTasks();
+
+        final CoordinationStateRejectedException coordinationStateRejectedException
+            = expectThrows(CoordinationStateRejectedException.class, future::actionGet);
+        assertThat(coordinationStateRejectedException.getMessage(), containsString(expectedMessage));
+        assertThat(coordinationStateRejectedException.getMessage(), containsString(localClusterState.metaData().clusterUUID()));
+        assertThat(coordinationStateRejectedException.getMessage(), containsString(otherClusterState.metaData().clusterUUID()));
+    }
 }

From 8577bbd73bac68b771c4dcfe39c90e5f519795c1 Mon Sep 17 00:00:00 2001
From: David Turner <david.turner@elastic.co>
Date: Tue, 16 Apr 2019 13:36:29 +0100
Subject: [PATCH 037/112] Inline TransportReplAct#createReplicatedOperation
 (#41197)

`TransportReplicationAction.AsyncPrimaryAction#createReplicatedOperation`
exists so it can be overridden in tests. This commit re-works these tests to
use a real `ReplicationOperation` and inlines the now-unnecessary method.

Relates #40706.
---
 .../replication/ReplicationOperation.java     |   2 +-
 .../TransportReplicationAction.java           |  62 +++----
 .../TransportReplicationActionTests.java      | 161 +++++++-----------
 3 files changed, 93 insertions(+), 132 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java
index 8f5d2a2ca53..a362502bd36 100644
--- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java
+++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java
@@ -74,7 +74,7 @@ public class ReplicationOperation<
     private final long primaryTerm;
 
     // exposed for tests
-    final ActionListener<PrimaryResultT> resultListener;
+    private final ActionListener<PrimaryResultT> resultListener;
 
     private volatile PrimaryResultT primaryResult = null;
 
diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
index 82dd77d6b58..92687d4880e 100644
--- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
@@ -358,37 +358,35 @@ public abstract class TransportReplicationAction<
                         });
                 } else {
                     setPhase(replicationTask, "primary");
-                    createReplicatedOperation(primaryRequest.getRequest(),
-                        ActionListener.wrap(result -> result.respond(
-                            new ActionListener<Response>() {
-                                @Override
-                                public void onResponse(Response response) {
-                                    if (syncGlobalCheckpointAfterOperation) {
-                                        final IndexShard shard = primaryShardReference.indexShard;
-                                        try {
-                                            shard.maybeSyncGlobalCheckpoint("post-operation");
-                                        } catch (final Exception e) {
-                                            // only log non-closed exceptions
-                                            if (ExceptionsHelper.unwrap(
-                                                e, AlreadyClosedException.class, IndexShardClosedException.class) == null) {
-                                                // intentionally swallow, a missed global checkpoint sync should not fail this operation
-                                                logger.info(
-                                                    new ParameterizedMessage(
-                                                        "{} failed to execute post-operation global checkpoint sync", shard.shardId()), e);
-                                            }
-                                        }
-                                    }
-                                    primaryShardReference.close(); // release shard operation lock before responding to caller
-                                    setPhase(replicationTask, "finished");
-                                    onCompletionListener.onResponse(response);
-                                }
 
-                                @Override
-                                public void onFailure(Exception e) {
-                                    handleException(primaryShardReference, e);
+                    final ActionListener<Response> referenceClosingListener = ActionListener.wrap(response -> {
+                        primaryShardReference.close(); // release shard operation lock before responding to caller
+                        setPhase(replicationTask, "finished");
+                        onCompletionListener.onResponse(response);
+                    }, e -> handleException(primaryShardReference, e));
+
+                    final ActionListener<Response> globalCheckpointSyncingListener = ActionListener.wrap(response -> {
+                        if (syncGlobalCheckpointAfterOperation) {
+                            final IndexShard shard = primaryShardReference.indexShard;
+                            try {
+                                shard.maybeSyncGlobalCheckpoint("post-operation");
+                            } catch (final Exception e) {
+                                // only log non-closed exceptions
+                                if (ExceptionsHelper.unwrap(
+                                    e, AlreadyClosedException.class, IndexShardClosedException.class) == null) {
+                                    // intentionally swallow, a missed global checkpoint sync should not fail this operation
+                                    logger.info(
+                                        new ParameterizedMessage(
+                                            "{} failed to execute post-operation global checkpoint sync", shard.shardId()), e);
                                 }
-                            }), e -> handleException(primaryShardReference, e)
-                        ), primaryShardReference).execute();
+                            }
+                        }
+                        referenceClosingListener.onResponse(response);
+                    }, referenceClosingListener::onFailure);
+
+                    new ReplicationOperation<>(primaryRequest.getRequest(), primaryShardReference,
+                        ActionListener.wrap(result -> result.respond(globalCheckpointSyncingListener), referenceClosingListener::onFailure),
+                        newReplicasProxy(), logger, actionName, primaryRequest.getPrimaryTerm()).execute();
                 }
             } catch (Exception e) {
                 handleException(primaryShardReference, e);
@@ -406,12 +404,6 @@ public abstract class TransportReplicationAction<
             onCompletionListener.onFailure(e);
         }
 
-        protected ReplicationOperation<Request, ReplicaRequest, PrimaryResult<ReplicaRequest, Response>> createReplicatedOperation(
-            Request request, ActionListener<PrimaryResult<ReplicaRequest, Response>> listener,
-            PrimaryShardReference primaryShardReference) {
-            return new ReplicationOperation<>(request, primaryShardReference, listener,
-                    newReplicasProxy(), logger, actionName, primaryRequest.getPrimaryTerm());
-        }
     }
 
     public static class PrimaryResult<ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
index 2cdd3ad2fe4..ccb23a9111a 100644
--- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
@@ -143,6 +143,7 @@ public class TransportReplicationActionTests extends ESTestCase {
         if (requestOrWrappedRequest instanceof TransportReplicationAction.ConcreteShardRequest) {
             requestOrWrappedRequest = ((TransportReplicationAction.ConcreteShardRequest<?>)requestOrWrappedRequest).getRequest();
         }
+        //noinspection unchecked
         return (R) requestOrWrappedRequest;
     }
 
@@ -209,7 +210,7 @@ public class TransportReplicationActionTests extends ESTestCase {
         setState(clusterService, ClusterState.builder(clusterService.state()).blocks(blocks).build());
     }
 
-    public void testBlocksInReroutePhase() throws Exception {
+    public void testBlocksInReroutePhase() {
         final ClusterBlock nonRetryableBlock =
             new ClusterBlock(1, "non retryable", false, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
         final ClusterBlock retryableBlock =
@@ -290,7 +291,6 @@ public class TransportReplicationActionTests extends ESTestCase {
 
             TestAction testActionWithNoBlocks = new TestAction(Settings.EMPTY, "internal:testActionWithNoBlocks", transportService,
                 clusterService, shardStateAction, threadPool);
-            listener = new PlainActionFuture<>();
             TestAction.ReroutePhase reroutePhase = testActionWithNoBlocks.new ReroutePhase(task, requestWithTimeout, listener);
             reroutePhase.run();
             assertListenerThrows("should fail with an IndexNotFoundException when no blocks", listener, IndexNotFoundException.class);
@@ -350,7 +350,7 @@ public class TransportReplicationActionTests extends ESTestCase {
         assertEquals(0, count.get());
     }
 
-    public void testNotStartedPrimary() throws InterruptedException, ExecutionException {
+    public void testNotStartedPrimary() {
         final String index = "test";
         final ShardId shardId = new ShardId(index, "_na_", 0);
         // no replicas in oder to skip the replication part
@@ -399,7 +399,7 @@ public class TransportReplicationActionTests extends ESTestCase {
      * This test checks that replication request is not routed back from relocation target to relocation source in case of
      * stale index routing table on relocation target.
      */
-    public void testNoRerouteOnStaleClusterState() throws InterruptedException, ExecutionException {
+    public void testNoRerouteOnStaleClusterState() {
         final String index = "test";
         final ShardId shardId = new ShardId(index, "_na_", 0);
         ClusterState state = state(index, true, ShardRoutingState.RELOCATING);
@@ -441,7 +441,7 @@ public class TransportReplicationActionTests extends ESTestCase {
         assertIndexShardCounter(0);
     }
 
-    public void testUnknownIndexOrShardOnReroute() throws InterruptedException {
+    public void testUnknownIndexOrShardOnReroute() {
         final String index = "test";
         // no replicas in oder to skip the replication part
         setState(clusterService, state(index, true,
@@ -462,10 +462,9 @@ public class TransportReplicationActionTests extends ESTestCase {
         reroutePhase.run();
         assertListenerThrows("must throw shard not found exception", listener, ShardNotFoundException.class);
         assertFalse(request.isRetrySet.get()); //TODO I'd have expected this to be true but we fail too early?
-
     }
 
-    public void testClosedIndexOnReroute() throws InterruptedException {
+    public void testClosedIndexOnReroute() {
         final String index = "test";
         // no replicas in oder to skip the replication part
         ClusterStateChanges clusterStateChanges = new ClusterStateChanges(xContentRegistry(), threadPool);
@@ -488,7 +487,7 @@ public class TransportReplicationActionTests extends ESTestCase {
         assertFalse(request.isRetrySet.get());
     }
 
-    public void testStalePrimaryShardOnReroute() throws InterruptedException {
+    public void testStalePrimaryShardOnReroute() {
         final String index = "test";
         final ShardId shardId = new ShardId(index, "_na_", 0);
         // no replicas in order to skip the replication part
@@ -596,23 +595,17 @@ public class TransportReplicationActionTests extends ESTestCase {
         }
         final TransportReplicationAction.ConcreteShardRequest<Request> primaryRequest
             = new TransportReplicationAction.ConcreteShardRequest<>(request, primaryShard.allocationId().getId(), primaryTerm);
-        action.new AsyncPrimaryAction(primaryRequest, listener, task) {
+
+        new TestAction(Settings.EMPTY, "internal:testAction2", transportService, clusterService, shardStateAction, threadPool) {
             @Override
-            protected ReplicationOperation<Request, Request, TransportReplicationAction.PrimaryResult<Request, TestResponse>>
-            createReplicatedOperation(
-                    Request request,
-                    ActionListener<TransportReplicationAction.PrimaryResult<Request, TestResponse>> actionListener,
-                    TransportReplicationAction<Request, Request, TestResponse>.PrimaryShardReference primaryShardReference) {
-                return new NoopReplicationOperation(request, actionListener, primaryTerm) {
-                    @Override
-                    public void execute() throws Exception {
-                        assertPhase(task, "primary");
-                        assertFalse(executed.getAndSet(true));
-                        super.execute();
-                    }
-                };
+            protected void shardOperationOnPrimary(Request shardRequest, IndexShard primary,
+                                                   ActionListener<PrimaryResult<Request, TestResponse>> listener) {
+                assertPhase(task, "primary");
+                assertFalse(executed.getAndSet(true));
+                super.shardOperationOnPrimary(shardRequest, primary, listener);
             }
-        }.run();
+        }.new AsyncPrimaryAction(primaryRequest, listener, task).run();
+
         if (executeOnPrimary) {
             assertTrue(executed.get());
             assertTrue(listener.isDone());
@@ -626,9 +619,12 @@ public class TransportReplicationActionTests extends ESTestCase {
                 transport.capturedRequestsByTargetNode().get(primaryShard.relocatingNodeId());
             assertThat(requests, notNullValue());
             assertThat(requests.size(), equalTo(1));
-            assertThat("primary request was not delegated to relocation target", requests.get(0).action, equalTo("internal:testAction[p]"));
-            assertThat("primary term not properly set on primary delegation",
-                ((TransportReplicationAction.ConcreteShardRequest<Request>)requests.get(0).request).getPrimaryTerm(), equalTo(primaryTerm));
+            assertThat("primary request was not delegated to relocation target",
+                requests.get(0).action, equalTo("internal:testAction2[p]"));
+            //noinspection unchecked
+            final TransportReplicationAction.ConcreteShardRequest<Request> concreteShardRequest
+                = (TransportReplicationAction.ConcreteShardRequest<Request>) requests.get(0).request;
+            assertThat("primary term not properly set on primary delegation", concreteShardRequest.getPrimaryTerm(), equalTo(primaryTerm));
             assertPhase(task, "primary_delegation");
             transport.handleResponse(requests.get(0).requestId, new TestResponse());
             assertTrue(listener.isDone());
@@ -638,7 +634,7 @@ public class TransportReplicationActionTests extends ESTestCase {
         }
     }
 
-    public void testPrimaryPhaseExecutesDelegatedRequestOnRelocationTarget() throws Exception {
+    public void testPrimaryPhaseExecutesDelegatedRequestOnRelocationTarget() {
         final String index = "test";
         final ShardId shardId = new ShardId(index, "_na_", 0);
         ClusterState state = state(index, true, ShardRoutingState.RELOCATING);
@@ -654,34 +650,24 @@ public class TransportReplicationActionTests extends ESTestCase {
         AtomicBoolean executed = new AtomicBoolean();
         final TransportReplicationAction.ConcreteShardRequest<Request> primaryRequest
             = new TransportReplicationAction.ConcreteShardRequest<>(request, primaryShard.allocationId().getRelocationId(), primaryTerm);
-        action.new AsyncPrimaryAction(primaryRequest, listener, task) {
-            @Override
-            protected ReplicationOperation<Request, Request, TransportReplicationAction.PrimaryResult<Request, TestResponse>>
-            createReplicatedOperation(
-                    Request request,
-                    ActionListener<TransportReplicationAction.PrimaryResult<Request, TestResponse>> actionListener,
-                    TransportReplicationAction<Request, Request, TestResponse>.PrimaryShardReference primaryShardReference) {
-                return new NoopReplicationOperation(request, actionListener, primaryTerm) {
-                    @Override
-                    public void execute() throws Exception {
-                        assertPhase(task, "primary");
-                        assertFalse(executed.getAndSet(true));
-                        super.execute();
-                    }
-                };
-            }
 
+        new TestAction(Settings.EMPTY, "internal:testAction2", transportService, clusterService, shardStateAction, threadPool) {
             @Override
-            public void onFailure(Exception e) {
-                throw new RuntimeException(e);
+            protected void shardOperationOnPrimary(Request shardRequest, IndexShard primary,
+                                                   ActionListener<PrimaryResult<Request, TestResponse>> listener) {
+                assertPhase(task, "primary");
+                assertFalse(executed.getAndSet(true));
+                super.shardOperationOnPrimary(shardRequest, primary, listener);
             }
-        }.run();
+        }.new AsyncPrimaryAction(primaryRequest, listener, task).run();
         assertThat(executed.get(), equalTo(true));
         assertPhase(task, "finished");
         assertFalse(request.isRetrySet.get());
+        assertTrue(listener.isDone());
+        listener.actionGet(); // throws no exception
     }
 
-    public void testPrimaryReference() throws Exception {
+    public void testPrimaryReference() {
         final IndexShard shard = mock(IndexShard.class);
 
         AtomicBoolean closed = new AtomicBoolean();
@@ -789,6 +775,7 @@ public class TransportReplicationActionTests extends ESTestCase {
                 inSyncIds,
                 shardRoutingTable.getAllAllocationIds()));
         doAnswer(invocation -> {
+            //noinspection unchecked
             ((ActionListener<Releasable>)invocation.getArguments()[0]).onResponse(() -> {});
             return null;
         }).when(shard).acquirePrimaryOperationPermit(any(), anyString(), anyObject());
@@ -805,6 +792,7 @@ public class TransportReplicationActionTests extends ESTestCase {
         action.handlePrimaryRequest(concreteShardRequest, createTransportChannel(listener), null);
         CapturingTransport.CapturedRequest[] requestsToReplicas = transport.capturedRequests();
         assertThat(requestsToReplicas, arrayWithSize(1));
+        //noinspection unchecked
         assertThat(((TransportReplicationAction.ConcreteShardRequest<Request>) requestsToReplicas[0].request).getPrimaryTerm(),
             equalTo(primaryTerm));
     }
@@ -821,47 +809,38 @@ public class TransportReplicationActionTests extends ESTestCase {
         Request request = new Request(shardId);
         PlainActionFuture<TestResponse> listener = new PlainActionFuture<>();
         ReplicationTask task = maybeTask();
-        int i = randomInt(3);
-        final boolean throwExceptionOnCreation = i == 1;
-        final boolean throwExceptionOnRun = i == 2;
-        final boolean respondWithError = i == 3;
+        int i = randomInt(2);
+        final boolean throwExceptionOnRun = i == 1;
+        final boolean respondWithError = i == 2;
         final TransportReplicationAction.ConcreteShardRequest<Request> primaryRequest
             = new TransportReplicationAction.ConcreteShardRequest<>(request, primaryShard.allocationId().getId(), primaryTerm);
-        action.new AsyncPrimaryAction(primaryRequest, listener, task) {
+
+        new TestAction(Settings.EMPTY, "internal:testAction2", transportService, clusterService, shardStateAction, threadPool) {
             @Override
-            protected ReplicationOperation<Request, Request, TransportReplicationAction.PrimaryResult<Request, TestResponse>>
-            createReplicatedOperation(
-                    Request request,
-                    ActionListener<TransportReplicationAction.PrimaryResult<Request, TestResponse>> actionListener,
-                    TransportReplicationAction<Request, Request, TestResponse>.PrimaryShardReference primaryShardReference) {
+            protected void shardOperationOnPrimary(Request shardRequest, IndexShard primary,
+                                                   ActionListener<PrimaryResult<Request, TestResponse>> listener) {
                 assertIndexShardCounter(1);
-                if (throwExceptionOnCreation) {
-                    throw new ElasticsearchException("simulated exception, during createReplicatedOperation");
+                if (throwExceptionOnRun) {
+                    throw new ElasticsearchException("simulated exception, during shardOperationOnPrimary");
+                } else if (respondWithError) {
+                    listener.onFailure(new ElasticsearchException("simulated exception, as a response"));
+                } else {
+                    super.shardOperationOnPrimary(request, primary, listener);
                 }
-                return new NoopReplicationOperation(request, actionListener, primaryTerm) {
-                    @Override
-                    public void execute() throws Exception {
-                        assertIndexShardCounter(1);
-                        assertPhase(task, "primary");
-                        if (throwExceptionOnRun) {
-                            throw new ElasticsearchException("simulated exception, during performOnPrimary");
-                        } else if (respondWithError) {
-                            this.resultListener.onFailure(new ElasticsearchException("simulated exception, as a response"));
-                        } else {
-                            super.execute();
-                        }
-                    }
-                };
             }
-        }.run();
+        }.new AsyncPrimaryAction(primaryRequest, listener, task).run();
+
         assertIndexShardCounter(0);
         assertTrue(listener.isDone());
         assertPhase(task, "finished");
 
         try {
             listener.get();
+            if (throwExceptionOnRun || respondWithError) {
+                fail("expected exception, but none was thrown");
+            }
         } catch (ExecutionException e) {
-            if (throwExceptionOnCreation || throwExceptionOnRun || respondWithError) {
+            if (throwExceptionOnRun || respondWithError) {
                 Throwable cause = e.getCause();
                 assertThat(cause, instanceOf(ElasticsearchException.class));
                 assertThat(cause.getMessage(), containsString("simulated"));
@@ -871,7 +850,7 @@ public class TransportReplicationActionTests extends ESTestCase {
         }
     }
 
-    public void testReplicasCounter() throws Exception {
+    public void testReplicasCounter() {
         final ShardId shardId = new ShardId("test", "_na_", 0);
         final ClusterState state = state(shardId.getIndexName(), true, ShardRoutingState.STARTED, ShardRoutingState.STARTED);
         setState(clusterService, state);
@@ -909,7 +888,7 @@ public class TransportReplicationActionTests extends ESTestCase {
      * This test ensures that replication operations adhere to the {@link IndexMetaData#SETTING_WAIT_FOR_ACTIVE_SHARDS} setting
      * when the request is using the default value for waitForActiveShards.
      */
-    public void testDefaultWaitForActiveShardsUsesIndexSetting() throws Exception {
+    public void testDefaultWaitForActiveShardsUsesIndexSetting() {
         final String indexName = "test";
         final ShardId shardId = new ShardId(indexName, "_na_", 0);
 
@@ -1167,9 +1146,9 @@ public class TransportReplicationActionTests extends ESTestCase {
     }
 
     public static class Request extends ReplicationRequest<Request> {
-        public AtomicBoolean processedOnPrimary = new AtomicBoolean();
-        public AtomicInteger processedOnReplicas = new AtomicInteger();
-        public AtomicBoolean isRetrySet = new AtomicBoolean(false);
+        AtomicBoolean processedOnPrimary = new AtomicBoolean();
+        AtomicInteger processedOnReplicas = new AtomicInteger();
+        AtomicBoolean isRetrySet = new AtomicBoolean(false);
 
         Request(StreamInput in) throws IOException {
             super(in);
@@ -1284,6 +1263,7 @@ public class TransportReplicationActionTests extends ESTestCase {
         return indexService;
     }
 
+    @SuppressWarnings("unchecked")
     private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService) {
         final IndexShard indexShard = mock(IndexShard.class);
         when(indexShard.shardId()).thenReturn(shardId);
@@ -1319,23 +1299,12 @@ public class TransportReplicationActionTests extends ESTestCase {
         doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class));
         when(indexShard.getPendingPrimaryTerm()).thenAnswer(i ->
             clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id()));
+
+        ReplicationGroup replicationGroup = mock(ReplicationGroup.class);
+        when(indexShard.getReplicationGroup()).thenReturn(replicationGroup);
         return indexShard;
     }
 
-    class NoopReplicationOperation extends ReplicationOperation<Request, Request, TestAction.PrimaryResult<Request, TestResponse>> {
-
-        NoopReplicationOperation(Request request, ActionListener<TestAction.PrimaryResult<Request, TestResponse>> listener,
-                                 long primaryTerm) {
-            super(request, null, listener, null, TransportReplicationActionTests.this.logger, "noop", primaryTerm);
-        }
-
-        @Override
-        public void execute() throws Exception {
-            // Using the diamond operator (<>) prevents Eclipse from being able to compile this code
-            this.resultListener.onResponse(new TransportReplicationAction.PrimaryResult<Request, TestResponse>(null, new TestResponse()));
-        }
-    }
-
     /**
      * Transport channel that is needed for replica operation testing.
      */
@@ -1348,12 +1317,12 @@ public class TransportReplicationActionTests extends ESTestCase {
             }
 
             @Override
-            public void sendResponse(TransportResponse response) throws IOException {
+            public void sendResponse(TransportResponse response) {
                 listener.onResponse(((TestResponse) response));
             }
 
             @Override
-            public void sendResponse(Exception exception) throws IOException {
+            public void sendResponse(Exception exception) {
                 listener.onFailure(exception);
             }
 

From 116167df5536b87e42273ba4a722c58c44504892 Mon Sep 17 00:00:00 2001
From: David Kyle <david.kyle@elastic.co>
Date: Tue, 16 Apr 2019 13:43:00 +0100
Subject: [PATCH 038/112] [ML] Write header to autodetect before it is visible
 to other calls (#41085)

---
 .../process/autodetect/AutodetectCommunicator.java | 14 ++++++++++++--
 .../autodetect/AutodetectProcessManager.java       |  7 ++++---
 2 files changed, 16 insertions(+), 5 deletions(-)

diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java
index b3f765d89ce..7e778e48524 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java
@@ -90,9 +90,8 @@ public class AutodetectCommunicator implements Closeable {
                 && job.getAnalysisConfig().getCategorizationFieldName() != null;
     }
 
-    public void init(ModelSnapshot modelSnapshot) throws IOException {
+    public void restoreState(ModelSnapshot modelSnapshot) {
         autodetectProcess.restoreState(stateStreamer, modelSnapshot);
-        createProcessWriter(Optional.empty()).writeHeader();
     }
 
     private DataToProcessWriter createProcessWriter(Optional<DataDescription> dataDescription) {
@@ -101,6 +100,17 @@ public class AutodetectCommunicator implements Closeable {
                 dataCountsReporter, xContentRegistry);
     }
 
+    /**
+     * This must be called once before {@link #writeToJob(InputStream, AnalysisRegistry, XContentType, DataLoadParams, BiConsumer)}
+     * can be used
+     */
+    public void writeHeader() throws IOException {
+        createProcessWriter(Optional.empty()).writeHeader();
+    }
+
+    /**
+     * Call {@link #writeHeader()} exactly once before using this method
+     */
     public void writeToJob(InputStream inputStream, AnalysisRegistry analysisRegistry, XContentType xContentType,
                            DataLoadParams params, BiConsumer<DataCounts, Exception> handler) {
         submitOperation(() -> {
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java
index eb387bfa5a2..1e35530fe17 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java
@@ -458,7 +458,7 @@ public class AutodetectProcessManager implements ClusterStateListener {
 
                                     try {
                                         createProcessAndSetRunning(processContext, job, params, closeHandler);
-                                        processContext.getAutodetectCommunicator().init(params.modelSnapshot());
+                                        processContext.getAutodetectCommunicator().restoreState(params.modelSnapshot());
                                         setJobState(jobTask, JobState.OPENED);
                                     } catch (Exception e1) {
                                         // No need to log here as the persistent task framework will log it
@@ -499,7 +499,7 @@ public class AutodetectProcessManager implements ClusterStateListener {
     private void createProcessAndSetRunning(ProcessContext processContext,
                                             Job job,
                                             AutodetectParams params,
-                                            BiConsumer<Exception, Boolean> handler) {
+                                            BiConsumer<Exception, Boolean> handler) throws IOException {
         // At this point we lock the process context until the process has been started.
         // The reason behind this is to ensure closing the job does not happen before
         // the process is started as that can result to the job getting seemingly closed
@@ -507,6 +507,7 @@ public class AutodetectProcessManager implements ClusterStateListener {
         processContext.tryLock();
         try {
             AutodetectCommunicator communicator = create(processContext.getJobTask(), job, params, handler);
+            communicator.writeHeader();
             processContext.setRunning(communicator);
         } finally {
             // Now that the process is running and we have updated its state we can unlock.
@@ -639,7 +640,7 @@ public class AutodetectProcessManager implements ClusterStateListener {
         processContext.tryLock();
         try {
             if (processContext.setDying() == false) {
-                logger.debug("Cannot close job [{}] as it has already been closed", jobId);
+                logger.debug("Cannot close job [{}] as it has been marked as dying", jobId);
                 return;
             }
 

From c22a2cea12852a95d8fe570d90487344e1149083 Mon Sep 17 00:00:00 2001
From: Jim Ferenczi <jim.ferenczi@elastic.co>
Date: Tue, 16 Apr 2019 13:02:10 +0200
Subject: [PATCH 039/112] BlendedTermQuery should ignore fields that don't
 exists in the index (#41125)

Today the blended term query detects if a term exists in a field by looking at the term statistics in the index.
However the value to indicate that a term has no occurence in a field have changed in Lucene. A non-existing term now returns
a doc and total term frequency of 0. Because of this disrepancy the blended term query picks 0 as the minimum frequency for a term
even if other fields have documents for this terms. This confuses the term queries that the blending creates since some of them
contain a custom state that indicates a frequency of 0 even though the term has some occurence in the field. For these terms an exception
is thrown because the term query always checks that the term state's frequency is greater than 0 if there are documents associate to it.
This change fixes this bug by ignoring terms with a doc freq of 0 when the blended term query picks the minimum term frequency among the
requested fields.

Closes #41118
---
 .../lucene/queries/BlendedTermQuery.java      | 30 ++--------
 .../lucene/queries/BlendedTermQueryTests.java | 59 +++++++++++++++++++
 2 files changed, 64 insertions(+), 25 deletions(-)

diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java
index dd3ac992475..1700979c32d 100644
--- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java
+++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java
@@ -113,23 +113,17 @@ public abstract class BlendedTermQuery extends Query {
             // TODO: Maybe it could also make sense to assume independent distributions of documents and eg. have:
             //   df = df1 + df2 - (df1 * df2 / maxDoc)?
             max = Math.max(df, max);
-            if (minSumTTF != -1 && ctx.totalTermFreq() != -1) {
+            if (ctx.totalTermFreq() > 0) {
                 // we need to find out the minimum sumTTF to adjust the statistics
                 // otherwise the statistics don't match
                 minSumTTF = Math.min(minSumTTF, reader.getSumTotalTermFreq(terms[i].field()));
-            } else {
-                minSumTTF = -1;
             }
 
         }
-        if (minSumTTF != -1 && maxDoc > minSumTTF) {
-            maxDoc = (int)minSumTTF;
-        }
-
         if (max == 0) {
             return; // we are done that term doesn't exist at all
         }
-        long sumTTF = minSumTTF == -1 ? -1 : 0;
+        long sumTTF = 0;
         final int[] tieBreak = new int[contexts.length];
         for (int i = 0; i < tieBreak.length; ++i) {
             tieBreak[i] = i;
@@ -165,11 +159,7 @@ public abstract class BlendedTermQuery extends Query {
             }
             contexts[i] = ctx = adjustDF(reader.getContext(), ctx, Math.min(maxDoc, actualDf));
             prev = current;
-            if (sumTTF >= 0 && ctx.totalTermFreq() >= 0) {
-                sumTTF += ctx.totalTermFreq();
-            } else {
-                sumTTF = -1;  // omit once TF is omitted anywhere!
-            }
+            sumTTF += ctx.totalTermFreq();
         }
         sumTTF = Math.min(sumTTF, minSumTTF);
         for (int i = 0; i < contexts.length; i++) {
@@ -177,17 +167,12 @@ public abstract class BlendedTermQuery extends Query {
             if (df == 0) {
                 continue;
             }
-            // the blended sumTTF can't be greater than the sumTTTF on the field
-            final long fixedTTF = sumTTF == -1 ? -1 : sumTTF;
-            contexts[i] = adjustTTF(reader.getContext(), contexts[i], fixedTTF);
+            contexts[i] = adjustTTF(reader.getContext(), contexts[i], sumTTF);
         }
     }
 
     private TermStates adjustTTF(IndexReaderContext readerContext, TermStates termContext, long sumTTF) throws IOException {
         assert termContext.wasBuiltFor(readerContext);
-        if (sumTTF == -1 && termContext.totalTermFreq() == -1) {
-            return termContext;
-        }
         TermStates newTermContext = new TermStates(readerContext);
         List<LeafReaderContext> leaves = readerContext.leaves();
         final int len;
@@ -213,12 +198,7 @@ public abstract class BlendedTermQuery extends Query {
     private static TermStates adjustDF(IndexReaderContext readerContext, TermStates ctx, int newDocFreq) throws IOException {
         assert ctx.wasBuiltFor(readerContext);
         // Use a value of ttf that is consistent with the doc freq (ie. gte)
-        long newTTF;
-        if (ctx.totalTermFreq() < 0) {
-            newTTF = -1;
-        } else {
-            newTTF = Math.max(ctx.totalTermFreq(), newDocFreq);
-        }
+        long newTTF = Math.max(ctx.totalTermFreq(), newDocFreq);
         List<LeafReaderContext> leaves = readerContext.leaves();
         final int len;
         if (leaves == null) {
diff --git a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java
index 1ad067a7e2b..ce33c247a33 100644
--- a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java
+++ b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java
@@ -28,10 +28,12 @@ import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermStates;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.DisjunctionMaxQuery;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
 import org.apache.lucene.search.QueryUtils;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.ScoreMode;
@@ -52,6 +54,8 @@ import java.util.Set;
 
 import static org.hamcrest.Matchers.containsInAnyOrder;
 import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.instanceOf;
 
 public class BlendedTermQueryTests extends ESTestCase {
     public void testDismaxQuery() throws IOException {
@@ -114,6 +118,61 @@ public class BlendedTermQueryTests extends ESTestCase {
             assertEquals(Integer.toString(1), reader.document(scoreDocs[0].doc).getField("id").stringValue());
 
         }
+        {
+            // test with an unknown field
+            String[] fields = new String[] {"username", "song", "unknown_field"};
+            Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "foo"), 1.0f);
+            Query rewrite = searcher.rewrite(query);
+            assertThat(rewrite, instanceOf(BooleanQuery.class));
+            for (BooleanClause clause : (BooleanQuery) rewrite) {
+                assertThat(clause.getQuery(), instanceOf(TermQuery.class));
+                TermQuery termQuery = (TermQuery) clause.getQuery();
+                TermStates termStates = termQuery.getTermStates();
+                if (termQuery.getTerm().field().equals("unknown_field")) {
+                    assertThat(termStates.docFreq(), equalTo(0));
+                    assertThat(termStates.totalTermFreq(), equalTo(0L));
+                } else {
+                    assertThat(termStates.docFreq(), greaterThan(0));
+                    assertThat(termStates.totalTermFreq(), greaterThan(0L));
+                }
+            }
+            assertThat(searcher.search(query, 10).totalHits.value, equalTo((long) iters + username.length));
+        }
+        {
+            // test with an unknown field and an unknown term
+            String[] fields = new String[] {"username", "song", "unknown_field"};
+            Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "unknown_term"), 1.0f);
+            Query rewrite = searcher.rewrite(query);
+            assertThat(rewrite, instanceOf(BooleanQuery.class));
+            for (BooleanClause clause : (BooleanQuery) rewrite) {
+                assertThat(clause.getQuery(), instanceOf(TermQuery.class));
+                TermQuery termQuery = (TermQuery) clause.getQuery();
+                TermStates termStates = termQuery.getTermStates();
+                assertThat(termStates.docFreq(), equalTo(0));
+                assertThat(termStates.totalTermFreq(), equalTo(0L));
+            }
+            assertThat(searcher.search(query, 10).totalHits.value, equalTo(0L));
+        }
+        {
+            // test with an unknown field and a term that is present in only one field
+            String[] fields = new String[] {"username", "song", "id", "unknown_field"};
+            Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "fan"), 1.0f);
+            Query rewrite = searcher.rewrite(query);
+            assertThat(rewrite, instanceOf(BooleanQuery.class));
+            for (BooleanClause clause : (BooleanQuery) rewrite) {
+                assertThat(clause.getQuery(), instanceOf(TermQuery.class));
+                TermQuery termQuery = (TermQuery) clause.getQuery();
+                TermStates termStates = termQuery.getTermStates();
+                if (termQuery.getTerm().field().equals("username")) {
+                    assertThat(termStates.docFreq(), equalTo(1));
+                    assertThat(termStates.totalTermFreq(), equalTo(1L));
+                } else {
+                    assertThat(termStates.docFreq(), equalTo(0));
+                    assertThat(termStates.totalTermFreq(), equalTo(0L));
+                }
+            }
+            assertThat(searcher.search(query, 10).totalHits.value, equalTo(1L));
+        }
         reader.close();
         w.close();
         dir.close();

From c4e84e2b34b924fcda972615ec707fdd9393f2d3 Mon Sep 17 00:00:00 2001
From: Armin Braun <me@obrown.io>
Date: Tue, 16 Apr 2019 17:19:05 +0200
Subject: [PATCH 040/112] Add Bulk Delete Api to BlobStore (#40322) (#41253)

* Adds Bulk delete API to blob container
* Implement bulk delete API for S3
* Adjust S3Fixture to accept both path styles for bulk deletes since the S3 SDK uses both during our ITs
* Closes #40250
---
 .../repositories/s3/S3BlobContainer.java      | 52 +++++++++++++
 .../repositories/s3/AmazonS3Fixture.java      |  7 +-
 .../repositories/s3/MockAmazonS3.java         |  6 +-
 .../common/blobstore/BlobContainer.java       | 31 +++++++-
 .../blobstore/BlobStoreRepository.java        | 73 ++++++++-----------
 .../ESBlobStoreContainerTestCase.java         | 18 +++++
 6 files changed, 136 insertions(+), 51 deletions(-)

diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java
index fc3f80b5b32..f98382e5526 100644
--- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java
+++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java
@@ -23,6 +23,7 @@ import com.amazonaws.AmazonClientException;
 import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
 import com.amazonaws.services.s3.model.AmazonS3Exception;
 import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
+import com.amazonaws.services.s3.model.DeleteObjectsRequest;
 import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
 import com.amazonaws.services.s3.model.ObjectListing;
 import com.amazonaws.services.s3.model.ObjectMetadata;
@@ -56,6 +57,12 @@ import static org.elasticsearch.repositories.s3.S3Repository.MIN_PART_SIZE_USING
 
 class S3BlobContainer extends AbstractBlobContainer {
 
+    /**
+     * Maximum number of deletes in a {@link DeleteObjectsRequest}.
+     * @see <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html">S3 Documentation</a>.
+     */
+    private static final int MAX_BULK_DELETES = 1000;
+
     private final S3BlobStore blobStore;
     private final String keyPath;
 
@@ -118,6 +125,51 @@ class S3BlobContainer extends AbstractBlobContainer {
         deleteBlobIgnoringIfNotExists(blobName);
     }
 
+    @Override
+    public void deleteBlobsIgnoringIfNotExists(List<String> blobNames) throws IOException {
+        if (blobNames.isEmpty()) {
+            return;
+        }
+        try (AmazonS3Reference clientReference = blobStore.clientReference()) {
+            // S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes
+            final List<DeleteObjectsRequest> deleteRequests = new ArrayList<>();
+            final List<String> partition = new ArrayList<>();
+            for (String blob : blobNames) {
+                partition.add(buildKey(blob));
+                if (partition.size() == MAX_BULK_DELETES ) {
+                    deleteRequests.add(bulkDelete(blobStore.bucket(), partition));
+                    partition.clear();
+                }
+            }
+            if (partition.isEmpty() == false) {
+                deleteRequests.add(bulkDelete(blobStore.bucket(), partition));
+            }
+            SocketAccess.doPrivilegedVoid(() -> {
+                AmazonClientException aex = null;
+                for (DeleteObjectsRequest deleteRequest : deleteRequests) {
+                    try {
+                        clientReference.client().deleteObjects(deleteRequest);
+                    } catch (AmazonClientException e) {
+                        if (aex == null) {
+                            aex = e;
+                        } else {
+                            aex.addSuppressed(e);
+                        }
+                    }
+                }
+                if (aex != null) {
+                    throw aex;
+                }
+            });
+        } catch (final AmazonClientException e) {
+            throw new IOException("Exception when deleting blobs [" + blobNames + "]", e);
+        }
+    }
+
+    private static DeleteObjectsRequest bulkDelete(String bucket, List<String> blobs) {
+        return new DeleteObjectsRequest(bucket).withKeys(blobs.toArray(Strings.EMPTY_ARRAY)).withQuiet(true);
+    }
+
     @Override
     public void deleteBlobIgnoringIfNotExists(String blobName) throws IOException {
         try (AmazonS3Reference clientReference = blobStore.clientReference()) {
diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java
index a411a1c53cf..51b1d5159ed 100644
--- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java
+++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java
@@ -324,7 +324,7 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
         // Delete Multiple Objects
         //
         // https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
-        handlers.insert(nonAuthPath(HttpPost.METHOD_NAME, "/"), (request) -> {
+        final RequestHandler bulkDeleteHandler = request -> {
             final List<String> deletes = new ArrayList<>();
             final List<String> errors = new ArrayList<>();
 
@@ -344,7 +344,6 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
                             if (closingOffset != -1) {
                                 offset = offset + startMarker.length();
                                 final String objectName = requestBody.substring(offset, closingOffset);
-
                                 boolean found = false;
                                 for (Bucket bucket : buckets.values()) {
                                     if (bucket.objects.containsKey(objectName)) {
@@ -369,7 +368,9 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
                 }
             }
             return newInternalError(request.getId(), "Something is wrong with this POST multiple deletes request");
-        });
+        };
+        handlers.insert(nonAuthPath(HttpPost.METHOD_NAME, "/"), bulkDeleteHandler);
+        handlers.insert(nonAuthPath(HttpPost.METHOD_NAME, "/{bucket}"), bulkDeleteHandler);
 
         // non-authorized requests
 
diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java
index 9e0a6009659..37f5d9b03db 100644
--- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java
+++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java
@@ -158,11 +158,7 @@ class MockAmazonS3 extends AbstractAmazonS3 {
 
         final List<DeleteObjectsResult.DeletedObject> deletions = new ArrayList<>();
         for (DeleteObjectsRequest.KeyVersion key : request.getKeys()) {
-            if (blobs.remove(key.getKey()) == null) {
-                AmazonS3Exception exception = new AmazonS3Exception("[" + key + "] does not exist.");
-                exception.setStatusCode(404);
-                throw exception;
-            } else {
+            if (blobs.remove(key.getKey()) != null) {
                 DeleteObjectsResult.DeletedObject deletion = new DeleteObjectsResult.DeletedObject();
                 deletion.setKey(key.getKey());
                 deletions.add(deletion);
diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java
index ab3971c3283..19d3a66a87d 100644
--- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java
+++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java
@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.nio.file.FileAlreadyExistsException;
 import java.nio.file.NoSuchFileException;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -96,8 +97,9 @@ public interface BlobContainer {
      * @throws  IOException if the input stream could not be read, or the target blob could not be written to.
      */
     void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException;
+
     /**
-     * Deletes a blob with giving name, if the blob exists. If the blob does not exist,
+     * Deletes the blob with the given name, if the blob exists. If the blob does not exist,
      * this method throws a NoSuchFileException.
      *
      * @param   blobName
@@ -107,6 +109,33 @@ public interface BlobContainer {
      */
     void deleteBlob(String blobName) throws IOException;
 
+    /**
+     * Deletes the blobs with given names. Unlike {@link #deleteBlob(String)} this method will not throw an exception
+     * when one or multiple of the given blobs don't exist and simply ignore this case.
+     *
+     * @param   blobNames  The names of the blob to delete.
+     * @throws  IOException if a subset of blob exists but could not be deleted.
+     */
+    default void deleteBlobsIgnoringIfNotExists(List<String> blobNames) throws IOException {
+        IOException ioe = null;
+        for (String blobName : blobNames) {
+            try {
+                deleteBlob(blobName);
+            } catch (NoSuchFileException e) {
+                // ignored
+            } catch (IOException e) {
+                if (ioe == null) {
+                    ioe = e;
+                } else {
+                    ioe.addSuppressed(e);
+                }
+            }
+        }
+        if (ioe != null) {
+            throw ioe;
+        }
+    }
+
     /**
      * Deletes a blob with giving name, ignoring if the blob does not exist.
      *
diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
index c6eb7e5c6c2..04af29438d4 100644
--- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
@@ -100,7 +100,6 @@ import org.elasticsearch.threadpool.ThreadPool;
 import java.io.FilterInputStream;
 import java.io.IOException;
 import java.io.InputStream;
-import java.nio.file.DirectoryNotEmptyException;
 import java.nio.file.FileAlreadyExistsException;
 import java.nio.file.NoSuchFileException;
 import java.util.ArrayList;
@@ -464,22 +463,16 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
             final Collection<IndexId> indicesToCleanUp = Sets.newHashSet(repositoryData.getIndices().values());
             indicesToCleanUp.removeAll(updatedRepositoryData.getIndices().values());
             final BlobContainer indicesBlobContainer = blobStore().blobContainer(basePath().add("indices"));
-            for (final IndexId indexId : indicesToCleanUp) {
                 try {
-                    indicesBlobContainer.deleteBlobIgnoringIfNotExists(indexId.getId());
-                } catch (DirectoryNotEmptyException dnee) {
-                    // if the directory isn't empty for some reason, it will fail to clean up;
-                    // we'll ignore that and accept that cleanup didn't fully succeed.
-                    // since we are using UUIDs for path names, this won't be an issue for
-                    // snapshotting indices of the same name
-                    logger.warn(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, " +
-                        "but failed to clean up its index folder due to the directory not being empty.", metadata.name(), indexId), dnee);
+                    indicesBlobContainer.deleteBlobsIgnoringIfNotExists(
+                        indicesToCleanUp.stream().map(IndexId::getId).collect(Collectors.toList()));
                 } catch (IOException ioe) {
                     // a different IOException occurred while trying to delete - will just log the issue for now
-                    logger.warn(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, " +
-                        "but failed to clean up its index folder.", metadata.name(), indexId), ioe);
+                    logger.warn(() ->
+                        new ParameterizedMessage(
+                            "[{}] indices {} are no longer part of any snapshots in the repository, " +
+                        "but failed to clean up their index folders.", metadata.name(), indicesToCleanUp), ioe);
                 }
-            }
         } catch (IOException | ResourceNotFoundException ex) {
             throw new RepositoryException(metadata.name(), "failed to delete snapshot [" + snapshotId + "]", ex);
         }
@@ -1016,16 +1009,14 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
             try {
                 // Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier
                 // attempt to write an index file with this generation failed mid-way after creating the temporary file.
-                for (final String blobName : blobs.keySet()) {
-                    if (FsBlobContainer.isTempBlobName(blobName)) {
-                        try {
-                            blobContainer.deleteBlobIgnoringIfNotExists(blobName);
-                        } catch (IOException e) {
-                            logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete index blob [{}] during finalization",
-                                snapshotId, shardId, blobName), e);
-                            throw e;
-                        }
-                    }
+                final List<String> blobNames =
+                    blobs.keySet().stream().filter(FsBlobContainer::isTempBlobName).collect(Collectors.toList());
+                try {
+                    blobContainer.deleteBlobsIgnoringIfNotExists(blobNames);
+                } catch (IOException e) {
+                    logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete index blobs {} during finalization",
+                        snapshotId, shardId, blobNames), e);
+                    throw e;
                 }
 
                 // If we deleted all snapshots, we don't need to create a new index file
@@ -1034,28 +1025,26 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
                 }
 
                 // Delete old index files
-                for (final String blobName : blobs.keySet()) {
-                    if (blobName.startsWith(SNAPSHOT_INDEX_PREFIX)) {
-                        try {
-                            blobContainer.deleteBlobIgnoringIfNotExists(blobName);
-                        } catch (IOException e) {
-                            logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete index blob [{}] during finalization",
-                                snapshotId, shardId, blobName), e);
-                            throw e;
-                        }
-                    }
+                final List<String> indexBlobs =
+                    blobs.keySet().stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)).collect(Collectors.toList());
+                try {
+                    blobContainer.deleteBlobsIgnoringIfNotExists(indexBlobs);
+                } catch (IOException e) {
+                    logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete index blobs {} during finalization",
+                        snapshotId, shardId, indexBlobs), e);
+                    throw e;
                 }
 
                 // Delete all blobs that don't exist in a snapshot
-                for (final String blobName : blobs.keySet()) {
-                    if (blobName.startsWith(DATA_BLOB_PREFIX) && (updatedSnapshots.findNameFile(canonicalName(blobName)) == null)) {
-                        try {
-                            blobContainer.deleteBlobIgnoringIfNotExists(blobName);
-                        } catch (IOException e) {
-                            logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete data blob [{}] during finalization",
-                                snapshotId, shardId, blobName), e);
-                        }
-                    }
+                final List<String> orphanedBlobs = blobs.keySet().stream()
+                    .filter(blobName ->
+                        blobName.startsWith(DATA_BLOB_PREFIX) && updatedSnapshots.findNameFile(canonicalName(blobName)) == null)
+                    .collect(Collectors.toList());
+                try {
+                    blobContainer.deleteBlobsIgnoringIfNotExists(orphanedBlobs);
+                } catch (IOException e) {
+                    logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete data blobs {} during finalization",
+                        snapshotId, shardId, orphanedBlobs), e);
                 }
             } catch (IOException e) {
                 String message = "Failed to finalize " + reason + " with shard index [" + currentIndexGen + "]";
diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java
index 3e4e639dd01..21071f7cb50 100644
--- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java
@@ -33,6 +33,7 @@ import java.nio.file.FileAlreadyExistsException;
 import java.nio.file.NoSuchFileException;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes;
@@ -136,6 +137,23 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase {
         }
     }
 
+    public void testDeleteBlobs() throws IOException {
+        try (BlobStore store = newBlobStore()) {
+            final List<String> blobNames = Arrays.asList("foobar", "barfoo");
+            final BlobContainer container = store.blobContainer(new BlobPath());
+            container.deleteBlobsIgnoringIfNotExists(blobNames); // does not raise when blobs don't exist
+            byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
+            final BytesArray bytesArray = new BytesArray(data);
+            for (String blobName : blobNames) {
+                writeBlob(container, blobName, bytesArray, randomBoolean());
+            }
+            assertEquals(container.listBlobs().size(), 2);
+            container.deleteBlobsIgnoringIfNotExists(blobNames);
+            assertTrue(container.listBlobs().isEmpty());
+            container.deleteBlobsIgnoringIfNotExists(blobNames); // does not raise when blobs don't exist
+        }
+    }
+
     public void testDeleteBlobIgnoringIfNotExists() throws IOException {
         try (BlobStore store = newBlobStore()) {
             BlobPath blobPath = new BlobPath();

From ec8709e831384298debc0f8a0c4e7afcf3d1c0a0 Mon Sep 17 00:00:00 2001
From: Gordon Brown <gordon.brown@elastic.co>
Date: Tue, 16 Apr 2019 09:25:51 -0600
Subject: [PATCH 041/112] Check allocation rules are cleared after ILM Shrink
 (#41170)

Adds some checks to make sure that the allocation rules that ILM adds
before a shrink are cleared after the shrink is complete
---
 .../xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java   | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java
index 69aca756554..b6b317e0c67 100644
--- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java
+++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java
@@ -59,6 +59,7 @@ import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.greaterThan;
 import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.nullValue;
 
 public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
     private String index;
@@ -218,6 +219,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
             assertThat(getStepKeyForIndex(shrunkenIndex), equalTo(TerminalPolicyStep.KEY));
             assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards)));
             assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true"));
+            assertThat(settings.get(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue());
         });
         expectThrows(ResponseException.class, this::indexDocument);
     }
@@ -461,6 +463,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
             assertThat(getStepKeyForIndex(shrunkenIndex), equalTo(TerminalPolicyStep.KEY));
             assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards)));
             assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true"));
+            assertThat(settings.get(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue());
         });
         expectThrows(ResponseException.class, this::indexDocument);
     }
@@ -480,6 +483,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
             assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY));
             assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(numberOfShards)));
             assertNull(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()));
+            assertThat(settings.get(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue());
         });
     }
 
@@ -523,6 +527,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
             assertThat(getStepKeyForIndex(shrunkenIndex), equalTo(TerminalPolicyStep.KEY));
             assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(1)));
             assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true"));
+            assertThat(settings.get(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue());
         }, 2, TimeUnit.MINUTES);
         expectThrows(ResponseException.class, this::indexDocument);
         // assert that snapshot succeeded

From 6cc35d372450dc1d644c99ffd881b807f85e2e1e Mon Sep 17 00:00:00 2001
From: David Roberts <dave.roberts@elastic.co>
Date: Tue, 16 Apr 2019 09:16:23 +0100
Subject: [PATCH 042/112] [ML] Unmute MachineLearningIT.testDeleteExpiredData
 (#41186)

The cause of failure was fixed by elastic/ml-cpp#459,
so all that remains on the Java side is to unmute the
test that was failing.

Closes #41070
---
 .../test/java/org/elasticsearch/client/MachineLearningIT.java    | 1 -
 1 file changed, 1 deletion(-)

diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java
index 3fdc00419cd..f7b7b148f66 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java
@@ -915,7 +915,6 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
         return forecastJobResponse.getForecastId();
     }
 
-    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41070")
     public void testDeleteExpiredData() throws Exception {
 
         String jobId = "test-delete-expired-data";

From 4bff26ef69a1baf8d4be0198d1d47f219e6f9843 Mon Sep 17 00:00:00 2001
From: Lisa Cawley <lcawley@elastic.co>
Date: Tue, 16 Apr 2019 09:08:07 -0700
Subject: [PATCH 043/112] [DOCS] Deprecates transport settings (#41174)

---
 docs/reference/migration/migrate_7_1.asciidoc | 52 ++++++++++++++++++-
 1 file changed, 51 insertions(+), 1 deletion(-)

diff --git a/docs/reference/migration/migrate_7_1.asciidoc b/docs/reference/migration/migrate_7_1.asciidoc
index aedca0a6304..57b63ffb77e 100644
--- a/docs/reference/migration/migrate_7_1.asciidoc
+++ b/docs/reference/migration/migrate_7_1.asciidoc
@@ -16,4 +16,54 @@ coming[7.1.0]
 
 //tag::notable-breaking-changes[]
 
-// end::notable-breaking-changes[]
\ No newline at end of file
+// end::notable-breaking-changes[]
+
+[float]
+[[breaking_71_http_changes]]
+=== HTTP changes
+
+[float]
+==== Deprecation of old HTTP settings
+
+The `http.tcp_no_delay` setting is deprecated in 7.1. It is replaced by
+`http.tcp.no_delay`.
+
+[float]
+[[breaking_71_network_changes]]
+=== Network changes
+
+[float]
+==== Deprecation of old network settings
+
+The `network.tcp.connect_timeout` setting is deprecated in 7.1. This setting
+was a fallback setting for `transport.connect_timeout`. To change the default
+connection timeout for client connections, modify `transport.connect_timeout`.
+
+[float]
+[[breaking_71_transport_changes]]
+=== Transport changes
+
+//tag::notable-breaking-changes[]
+[float]
+==== Deprecation of old transport settings
+
+The following settings are deprecated in 7.1. Each setting has a replacement
+setting that was introduced in 6.7.
+
+- `transport.tcp.port` is replaced by `transport.port`
+- `transport.tcp.compress` is replaced by `transport.compress`
+- `transport.tcp.connect_timeout` is replaced by `transport.connect_timeout`
+- `transport.tcp_no_delay` is replaced by `transport.tcp.no_delay`
+- `transport.profiles.profile_name.tcp_no_delay` is replaced by
+`transport.profiles.profile_name.tcp.no_delay`
+- `transport.profiles.profile_name.tcp_keep_alive` is replaced by
+`transport.profiles.profile_name.tcp.keep_alive`
+- `transport.profiles.profile_name.reuse_address` is replaced by
+`transport.profiles.profile_name.tcp.reuse_address`
+- `transport.profiles.profile_name.send_buffer_size` is replaced by `transport.profiles.profile_name.tcp.send_buffer_size`
+- `transport.profiles.profile_name.receive_buffer_size` is replaced by `transport.profiles.profile_name.tcp.receive_buffer_size`
+
+// end::notable-breaking-changes[]
+
+
+

From 9bf8bd40aed95fa0b6f913a6c45eecc454dc8cf2 Mon Sep 17 00:00:00 2001
From: Costin Leau <costin@users.noreply.github.com>
Date: Tue, 16 Apr 2019 19:49:00 +0300
Subject: [PATCH 044/112] SQL: Tweak pattern matching in SYS TABLES (#41243)

Yet another improvement to SYS TABLES on differentiating between table
types specified as '%' and '' while maintaining legacy support for null

Fix #40775

(cherry picked from commit 6dbca5edd335eb1da8e7825389a15e5fe45397d4)
---
 .../xpack/sql/parser/CommandBuilder.java      |  2 +-
 .../logical/command/sys/SysTablesTests.java   | 50 +++++++++++++++----
 2 files changed, 41 insertions(+), 11 deletions(-)

diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java
index 04935023747..ba2a3906995 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java
@@ -146,7 +146,7 @@ abstract class CommandBuilder extends LogicalPlanBuilder {
         boolean legacyTableType = false;
         for (StringContext string : ctx.string()) {
             String value = string(string);
-            if (value != null) {
+            if (value != null && value.isEmpty() == false) {
                 // check special ODBC wildcard case
                 if (value.equals(StringUtils.SQL_WILDCARD) && ctx.string().size() == 1) {
                     // treat % as null
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java
index e2baeb2d8af..d7a24681329 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java
@@ -54,7 +54,30 @@ public class SysTablesTests extends ESTestCase {
     //
     // catalog enumeration
     //
-    public void testSysTablesCatalogEnumeration() throws Exception {
+    public void testSysTablesCatalogEnumerationWithEmptyType() throws Exception {
+        executeCommand("SYS TABLES CATALOG LIKE '%' LIKE '' TYPE ''", r -> {
+            assertEquals(1, r.size());
+            assertEquals(CLUSTER_NAME, r.column(0));
+            // everything else should be null
+            for (int i = 1; i < 10; i++) {
+                assertNull(r.column(i));
+            }
+        }, index);
+    }
+
+    public void testSysTablesCatalogAllTypes() throws Exception {
+        executeCommand("SYS TABLES CATALOG LIKE '%' LIKE '' TYPE '%'", r -> {
+            assertEquals(1, r.size());
+            assertEquals(CLUSTER_NAME, r.column(0));
+            // everything else should be null
+            for (int i = 1; i < 10; i++) {
+                assertNull(r.column(i));
+            }
+        }, new IndexInfo[0]);
+    }
+
+    // when types are null, consider them equivalent to '' for compatibility reasons
+    public void testSysTablesCatalogNoTypes() throws Exception {
         executeCommand("SYS TABLES CATALOG LIKE '%' LIKE ''", r -> {
             assertEquals(1, r.size());
             assertEquals(CLUSTER_NAME, r.column(0));
@@ -65,24 +88,18 @@ public class SysTablesTests extends ESTestCase {
         }, index);
     }
 
+
     //
     // table types enumeration
     //
+
+    // missing type means pattern
     public void testSysTablesTypesEnumerationWoString() throws Exception {
         executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> {
             assertEquals(2, r.size());
             assertEquals("BASE TABLE", r.column(3));
             assertTrue(r.advanceRow());
             assertEquals("VIEW", r.column(3));
-        }, new IndexInfo[0]);
-    }
-
-    public void testSysTablesEnumerateTypes() throws Exception {
-        executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> {
-            assertEquals(2, r.size());
-            assertEquals("BASE TABLE", r.column(3));
-            assertTrue(r.advanceRow());
-            assertEquals("VIEW", r.column(3));
         }, alias, index);
     }
 
@@ -107,6 +124,13 @@ public class SysTablesTests extends ESTestCase {
         }, new IndexInfo[0]);
     }
 
+    // when a type is specified, apply filtering
+    public void testSysTablesTypesEnumerationAllCatalogsAndSpecifiedView() throws Exception {
+        executeCommand("SYS TABLES CATALOG LIKE '%' LIKE '' TYPE 'VIEW'", r -> {
+            assertEquals(0, r.size());
+        }, new IndexInfo[0]);
+    }
+
     public void testSysTablesDifferentCatalog() throws Exception {
         executeCommand("SYS TABLES CATALOG LIKE 'foo'", r -> {
             assertEquals(0, r.size());
@@ -262,6 +286,12 @@ public class SysTablesTests extends ESTestCase {
         }, alias);
     }
 
+    public void testSysTablesWithEmptyCatalogOnlyAliases() throws Exception {
+        executeCommand("SYS TABLES CATALOG LIKE '' LIKE 'test' TYPE 'VIEW'", r -> {
+            assertEquals(0, r.size());
+        }, alias);
+    }
+
     public void testSysTablesWithInvalidType() throws Exception {
         executeCommand("SYS TABLES LIKE 'test' TYPE 'QUE HORA ES'", r -> {
             assertEquals(0, r.size());

From 043c1f5d428f11f9303e43058d5a61685a638608 Mon Sep 17 00:00:00 2001
From: Jim Ferenczi <jim.ferenczi@elastic.co>
Date: Tue, 16 Apr 2019 18:16:55 +0200
Subject: [PATCH 045/112] Unified highlighter should respect no_match_size with
 number_of_fragments set to 0 (#41069)

The unified highlighter returns the first sentence of the text when number_of_fragments
is set to 0 (full highlighting). This is a legacy of the removed postings highlighter
that was based on sentence break only. This commit changes this behavior in order
to respect the provided no_match_size value when number_of_fragments is set to 0.
This means that the behavior will be consistent for any value of the number_of_fragments option.

Closes #41066
---
 .../highlight/UnifiedHighlighter.java         | 37 -------------------
 .../highlight/HighlighterSearchIT.java        |  8 ++--
 2 files changed, 5 insertions(+), 40 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java
index af37fa4edab..2d570d2b7c7 100644
--- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java
+++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java
@@ -124,8 +124,6 @@ public class UnifiedHighlighter implements Highlighter {
                 "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
         }
 
-        snippets = filterSnippets(snippets, field.fieldOptions().numberOfFragments());
-
         if (field.fieldOptions().scoreOrdered()) {
             //let's sort the snippets by score if needed
             CollectionUtil.introSort(snippets, (o1, o2) -> Double.compare(o2.getScore(), o1.getScore()));
@@ -185,41 +183,6 @@ public class UnifiedHighlighter implements Highlighter {
         }
     }
 
-    protected static List<Snippet> filterSnippets(List<Snippet> snippets, int numberOfFragments) {
-
-        //We need to filter the snippets as due to no_match_size we could have
-        //either highlighted snippets or non highlighted ones and we don't want to mix those up
-        List<Snippet> filteredSnippets = new ArrayList<>(snippets.size());
-        for (Snippet snippet : snippets) {
-            if (snippet.isHighlighted()) {
-                filteredSnippets.add(snippet);
-            }
-        }
-
-        //if there's at least one highlighted snippet, we return all the highlighted ones
-        //otherwise we return the first non highlighted one if available
-        if (filteredSnippets.size() == 0) {
-            if (snippets.size() > 0) {
-                Snippet snippet = snippets.get(0);
-                //if we tried highlighting the whole content using whole break iterator (as number_of_fragments was 0)
-                //we need to return the first sentence of the content rather than the whole content
-                if (numberOfFragments == 0) {
-                    BreakIterator bi = BreakIterator.getSentenceInstance(Locale.ROOT);
-                    String text = snippet.getText();
-                    bi.setText(text);
-                    int next = bi.next();
-                    if (next != BreakIterator.DONE) {
-                        String newText = text.substring(0, next).trim();
-                        snippet = new Snippet(newText, snippet.getScore(), snippet.isHighlighted());
-                    }
-                }
-                filteredSnippets.add(snippet);
-            }
-        }
-
-        return filteredSnippets;
-    }
-
     protected static String convertFieldValue(MappedFieldType type, Object value) {
         if (value instanceof BytesRef) {
             return type.valueForDisplay(value).toString();
diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
index e111abe0d51..d1a66969531 100644
--- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
+++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
@@ -1715,9 +1715,11 @@ public class HighlighterSearchIT extends ESIntegTestCase {
         assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
 
         // We can also ask for a fragment longer than the input string and get the whole string
-        field.highlighterType("plain").noMatchSize(text.length() * 2);
-        response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get();
-        assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+        for (String type : new String[] { "plain", "unified" }) {
+            field.highlighterType(type).noMatchSize(text.length() * 2).numOfFragments(0);
+            response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get();
+            assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+        }
 
         field.highlighterType("fvh");
         response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get();

From 02247cc7df46da7e233410f613c30607d1dd7be3 Mon Sep 17 00:00:00 2001
From: Hendrik Muhs <hendrik.muhs@elastic.co>
Date: Tue, 16 Apr 2019 18:53:36 +0200
Subject: [PATCH 046/112] [ML-DataFrame] adapt page size on circuit breaker
 responses (#41149)

handle circuit breaker response and adapt page size to reduce memory pressure, reduce preview buckets to 100, initial page size to 500
---
 .../core/dataframe/DataFrameMessages.java     |   5 +
 .../integration/DataFramePivotRestIT.java     |   3 +-
 ...nsportPreviewDataFrameTransformAction.java |   3 +-
 .../transforms/DataFrameIndexer.java          |  97 ++++++-
 .../transforms/DataFrameTransformTask.java    |  74 +++---
 .../dataframe/transforms/pivot/Pivot.java     |  23 +-
 .../transforms/DataFrameIndexerTests.java     | 239 ++++++++++++++++++
 7 files changed, 409 insertions(+), 35 deletions(-)
 create mode 100644 x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java

diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java
index dbe789ca3ae..86dce1b3314 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java
@@ -56,6 +56,11 @@ public class DataFrameMessages {
             "Failed to parse group_by for data frame pivot transform";
     public static final String LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_AGGREGATION =
             "Failed to parse aggregation for data frame pivot transform";
+    public static final String LOG_DATA_FRAME_TRANSFORM_PIVOT_REDUCE_PAGE_SIZE =
+            "Search returned with out of memory error, reducing number of buckets per search from [{0}] to [{1}]";
+    public static final String LOG_DATA_FRAME_TRANSFORM_PIVOT_LOW_PAGE_SIZE_FAILURE =
+            "Search returned with out of memory error after repeated page size reductions to [{0}], unable to continue pivot, "
+            + "please simplify job or increase heap size on data nodes.";
 
     public static final String FAILED_TO_PARSE_TRANSFORM_CHECKPOINTS =
             "Failed to parse transform checkpoints for [{0}]";
diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java
index 0d14851aa7c..6ff97e1ed9d 100644
--- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java
+++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java
@@ -260,7 +260,8 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase {
         createPreviewRequest.setJsonEntity(config);
         Map<String, Object> previewDataframeResponse = entityAsMap(client().performRequest(createPreviewRequest));
         List<Map<String, Object>> preview = (List<Map<String, Object>>)previewDataframeResponse.get("preview");
-        assertThat(preview.size(), equalTo(393));
+        // preview is limited to 100
+        assertThat(preview.size(), equalTo(100));
         Set<String> expectedFields = new HashSet<>(Arrays.asList("reviewer", "by_day", "avg_rating"));
         preview.forEach(p -> {
             Set<String> keys = p.keySet();
diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java
index 2a4ba47f507..b65830f72e7 100644
--- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java
+++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java
@@ -35,6 +35,7 @@ import static org.elasticsearch.xpack.dataframe.transforms.DataFrameIndexer.COMP
 public class TransportPreviewDataFrameTransformAction extends
     HandledTransportAction<PreviewDataFrameTransformAction.Request, PreviewDataFrameTransformAction.Response> {
 
+    private static final int NUMBER_OF_PREVIEW_BUCKETS = 100;
     private final XPackLicenseState licenseState;
     private final Client client;
     private final ThreadPool threadPool;
@@ -77,7 +78,7 @@ public class TransportPreviewDataFrameTransformAction extends
                     ClientHelper.DATA_FRAME_ORIGIN,
                     client,
                     SearchAction.INSTANCE,
-                    pivot.buildSearchRequest(null),
+                    pivot.buildSearchRequest(null, NUMBER_OF_PREVIEW_BUCKETS),
                     ActionListener.wrap(
                         r -> {
                             final CompositeAggregation agg = r.getAggregations().get(COMPOSITE_AGGREGATION_NAME);
diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java
index c781d05f189..c670f32740c 100644
--- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java
+++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java
@@ -8,14 +8,21 @@ package org.elasticsearch.xpack.dataframe.transforms;
 
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ExceptionsHelper;
 import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
 import org.elasticsearch.action.search.SearchRequest;
 import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.breaker.CircuitBreakingException;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.index.query.QueryBuilder;
 import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation;
+import org.elasticsearch.xpack.core.common.notifications.Auditor;
 import org.elasticsearch.xpack.core.dataframe.DataFrameField;
+import org.elasticsearch.xpack.core.dataframe.DataFrameMessages;
+import org.elasticsearch.xpack.core.dataframe.notifications.DataFrameAuditMessage;
 import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats;
 import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig;
 import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer;
@@ -26,6 +33,7 @@ import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot;
 import java.io.IOException;
 import java.io.UncheckedIOException;
 import java.util.Map;
+import java.util.Objects;
 import java.util.concurrent.Executor;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.stream.Collectors;
@@ -35,22 +43,34 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
 
 public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer<Map<String, Object>, DataFrameIndexerTransformStats> {
 
+    public static final int MINIMUM_PAGE_SIZE = 10;
     public static final String COMPOSITE_AGGREGATION_NAME = "_data_frame";
     private static final Logger logger = LogManager.getLogger(DataFrameIndexer.class);
 
+    protected final Auditor<DataFrameAuditMessage> auditor;
+
     private Pivot pivot;
+    private int pageSize = 0;
 
     public DataFrameIndexer(Executor executor,
+                            Auditor<DataFrameAuditMessage> auditor,
                             AtomicReference<IndexerState> initialState,
                             Map<String, Object> initialPosition,
                             DataFrameIndexerTransformStats jobStats) {
         super(executor, initialState, initialPosition, jobStats);
+        this.auditor = Objects.requireNonNull(auditor);
     }
 
     protected abstract DataFrameTransformConfig getConfig();
 
     protected abstract Map<String, String> getFieldMappings();
 
+    protected abstract void failIndexer(String message);
+
+    public int getPageSize() {
+        return pageSize;
+    }
+
     /**
      * Request a checkpoint
      */
@@ -62,6 +82,11 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer<Map<String,
             QueryBuilder queryBuilder = getConfig().getSource().getQueryConfig().getQuery();
             pivot = new Pivot(getConfig().getSource().getIndex(), queryBuilder, getConfig().getPivotConfig());
 
+            // if we haven't set the page size yet, if it is set we might have reduced it after running into an out of memory
+            if (pageSize == 0) {
+                pageSize = pivot.getInitialPageSize();
+            }
+
             // if run for the 1st time, create checkpoint
             if (getPosition() == null) {
                 createCheckpoint(listener);
@@ -73,6 +98,12 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer<Map<String,
         }
     }
 
+    @Override
+    protected void onFinish(ActionListener<Void> listener) {
+        // reset the page size, so we do not memorize a low page size forever, the pagesize will be re-calculated on start
+        pageSize = 0;
+    }
+
     @Override
     protected IterationResult<Map<String, Object>> doProcess(SearchResponse searchResponse) {
         final CompositeAggregation agg = searchResponse.getAggregations().get(COMPOSITE_AGGREGATION_NAME);
@@ -121,6 +152,70 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer<Map<String,
 
     @Override
     protected SearchRequest buildSearchRequest() {
-        return pivot.buildSearchRequest(getPosition());
+        return pivot.buildSearchRequest(getPosition(), pageSize);
+    }
+
+    /**
+     * Handle the circuit breaking case: A search consumed to much memory and got aborted.
+     *
+     * Going out of memory we smoothly reduce the page size which reduces memory consumption.
+     *
+     * Implementation details: We take the values from the circuit breaker as a hint, but
+     * note that it breaks early, that's why we also reduce using
+     *
+     * @param e Exception thrown, only {@link CircuitBreakingException} are handled
+     * @return true if exception was handled, false if not
+     */
+    protected boolean handleCircuitBreakingException(Exception e) {
+        CircuitBreakingException circuitBreakingException = getCircuitBreakingException(e);
+
+        if (circuitBreakingException == null) {
+            return false;
+        }
+
+        double reducingFactor = Math.min((double) circuitBreakingException.getByteLimit() / circuitBreakingException.getBytesWanted(),
+                1 - (Math.log10(pageSize) * 0.1));
+
+        int newPageSize = (int) Math.round(reducingFactor * pageSize);
+
+        if (newPageSize < MINIMUM_PAGE_SIZE) {
+            String message = DataFrameMessages.getMessage(DataFrameMessages.LOG_DATA_FRAME_TRANSFORM_PIVOT_LOW_PAGE_SIZE_FAILURE, pageSize);
+            failIndexer(message);
+            return true;
+        }
+
+        String message = DataFrameMessages.getMessage(DataFrameMessages.LOG_DATA_FRAME_TRANSFORM_PIVOT_REDUCE_PAGE_SIZE, pageSize,
+                newPageSize);
+        auditor.info(getJobId(), message);
+        logger.info("Data frame transform [" + getJobId() + "]:" + message);
+
+        pageSize = newPageSize;
+        return true;
+    }
+
+    /**
+     * Inspect exception for circuit breaking exception and return the first one it can find.
+     *
+     * @param e Exception
+     * @return CircuitBreakingException instance if found, null otherwise
+     */
+    private static CircuitBreakingException getCircuitBreakingException(Exception e) {
+        // circuit breaking exceptions are at the bottom
+        Throwable unwrappedThrowable = ExceptionsHelper.unwrapCause(e);
+
+        if (unwrappedThrowable instanceof CircuitBreakingException) {
+            return (CircuitBreakingException) unwrappedThrowable;
+        } else if (unwrappedThrowable instanceof SearchPhaseExecutionException) {
+            SearchPhaseExecutionException searchPhaseException = (SearchPhaseExecutionException) e;
+            for (ShardSearchFailure shardFailure : searchPhaseException.shardFailures()) {
+                Throwable unwrappedShardFailure = ExceptionsHelper.unwrapCause(shardFailure.getCause());
+
+                if (unwrappedShardFailure instanceof CircuitBreakingException) {
+                    return (CircuitBreakingException) unwrappedShardFailure;
+                }
+            }
+        }
+
+        return null;
     }
 }
diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java
index 69ceb32dfc7..4088863a895 100644
--- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java
+++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java
@@ -277,25 +277,6 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S
         ));
     }
 
-    private boolean isIrrecoverableFailure(Exception e) {
-        return e instanceof IndexNotFoundException || e instanceof DataFrameConfigurationException;
-    }
-
-    synchronized void handleFailure(Exception e) {
-        if (isIrrecoverableFailure(e) || failureCount.incrementAndGet() > MAX_CONTINUOUS_FAILURES) {
-            String failureMessage = isIrrecoverableFailure(e) ?
-                "task encountered irrecoverable failure: " + e.getMessage() :
-                "task encountered more than " + MAX_CONTINUOUS_FAILURES + " failures; latest failure: " + e.getMessage();
-            auditor.error(transform.getId(), failureMessage);
-            stateReason.set(failureMessage);
-            taskState.set(DataFrameTransformTaskState.FAILED);
-            persistStateToClusterState(getState(), ActionListener.wrap(
-                r -> failureCount.set(0), // Successfully marked as failed, reset counter so that task can be restarted
-                exception -> {} // Noop, internal method logs the failure to update the state
-            ));
-        }
-    }
-
     /**
      * This is called when the persistent task signals that the allocated task should be terminated.
      * Termination in the task framework is essentially voluntary, as the allocated task can only be
@@ -313,13 +294,11 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S
 
     protected class ClientDataFrameIndexer extends DataFrameIndexer {
         private static final int LOAD_TRANSFORM_TIMEOUT_IN_SECONDS = 30;
-        private static final int CREATE_CHECKPOINT_TIMEOUT_IN_SECONDS = 30;
 
         private final Client client;
         private final DataFrameTransformsConfigManager transformsConfigManager;
         private final DataFrameTransformsCheckpointService transformsCheckpointService;
         private final String transformId;
-        private final Auditor<DataFrameAuditMessage> auditor;
         private volatile DataFrameIndexerTransformStats previouslyPersistedStats = null;
         // Keeps track of the last exception that was written to our audit, keeps us from spamming the audit index
         private volatile String lastAuditedExceptionMessage = null;
@@ -331,13 +310,12 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S
                                       DataFrameTransformsCheckpointService transformsCheckpointService,
                                       AtomicReference<IndexerState> initialState, Map<String, Object> initialPosition, Client client,
                                       Auditor<DataFrameAuditMessage> auditor) {
-            super(threadPool.executor(ThreadPool.Names.GENERIC), initialState, initialPosition,
+            super(threadPool.executor(ThreadPool.Names.GENERIC), auditor, initialState, initialPosition,
                 new DataFrameIndexerTransformStats(transformId));
             this.transformId = transformId;
             this.transformsConfigManager = transformsConfigManager;
             this.transformsCheckpointService = transformsCheckpointService;
             this.client = client;
-            this.auditor = auditor;
         }
 
         @Override
@@ -474,19 +452,26 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S
 
         @Override
         protected void onFailure(Exception exc) {
-            // Since our schedule fires again very quickly after failures it is possible to run into the same failure numerous
-            // times in a row, very quickly. We do not want to spam the audit log with repeated failures, so only record the first one
-            if (exc.getMessage().equals(lastAuditedExceptionMessage) == false) {
-                auditor.warning(transform.getId(), "Data frame transform encountered an exception: " + exc.getMessage());
-                lastAuditedExceptionMessage = exc.getMessage();
+            // the failure handler must not throw an exception due to internal problems
+            try {
+                logger.warn("Data frame transform [" + transform.getId() + "] encountered an exception: ", exc);
+
+                // Since our schedule fires again very quickly after failures it is possible to run into the same failure numerous
+                // times in a row, very quickly. We do not want to spam the audit log with repeated failures, so only record the first one
+                if (exc.getMessage().equals(lastAuditedExceptionMessage) == false) {
+                    auditor.warning(transform.getId(), "Data frame transform encountered an exception: " + exc.getMessage());
+                    lastAuditedExceptionMessage = exc.getMessage();
+                }
+                handleFailure(exc);
+            } catch (Exception e) {
+                logger.error("Data frame transform encountered an unexpected internal exception: " ,e);
             }
-            logger.warn("Data frame transform [" + transform.getId() + "] encountered an exception: ", exc);
-            handleFailure(exc);
         }
 
         @Override
         protected void onFinish(ActionListener<Void> listener) {
             try {
+                super.onFinish(listener);
                 long checkpoint = currentCheckpoint.incrementAndGet();
                 auditor.info(transform.getId(), "Finished indexing for data frame transform checkpoint [" + checkpoint + "]");
                 logger.info("Finished indexing for data frame transform [" + transform.getId() + "] checkpoint [" + checkpoint + "]");
@@ -515,6 +500,35 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S
                 listener.onFailure(new RuntimeException("Failed to retrieve checkpoint", getCheckPointException));
             }));
         }
+
+        private boolean isIrrecoverableFailure(Exception e) {
+            return e instanceof IndexNotFoundException || e instanceof DataFrameConfigurationException;
+        }
+
+        synchronized void handleFailure(Exception e) {
+            if (handleCircuitBreakingException(e)) {
+                return;
+            }
+
+            if (isIrrecoverableFailure(e) || failureCount.incrementAndGet() > MAX_CONTINUOUS_FAILURES) {
+                String failureMessage = isIrrecoverableFailure(e) ?
+                    "task encountered irrecoverable failure: " + e.getMessage() :
+                    "task encountered more than " + MAX_CONTINUOUS_FAILURES + " failures; latest failure: " + e.getMessage();
+                failIndexer(failureMessage);
+            }
+        }
+
+        @Override
+        protected void failIndexer(String failureMessage) {
+            logger.error("Data frame transform [" + getJobId() + "]:" + failureMessage);
+            auditor.error(transform.getId(), failureMessage);
+            stateReason.set(failureMessage);
+            taskState.set(DataFrameTransformTaskState.FAILED);
+            persistStateToClusterState(DataFrameTransformTask.this.getState(), ActionListener.wrap(
+                r -> failureCount.set(0), // Successfully marked as failed, reset counter so that task can be restarted
+                exception -> {} // Noop, internal method logs the failure to update the state
+            ));
+        }
     }
 
     class DataFrameConfigurationException extends RuntimeException {
diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java
index 0cf3edec162..aa63ea92e7a 100644
--- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java
+++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java
@@ -35,6 +35,8 @@ import java.util.stream.Stream;
 import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
 
 public class Pivot {
+    public static final int DEFAULT_INITIAL_PAGE_SIZE = 500;
+
     private static final String COMPOSITE_AGGREGATION_NAME = "_data_frame";
 
     private final PivotConfig config;
@@ -68,11 +70,29 @@ public class Pivot {
         SchemaUtil.deduceMappings(client, config, source, listener);
     }
 
-    public SearchRequest buildSearchRequest(Map<String, Object> position) {
+    /**
+     * Get the initial page size for this pivot.
+     *
+     * The page size is the main parameter for adjusting memory consumption. Memory consumption mainly depends on
+     * the page size, the type of aggregations and the data. As the page size is the number of buckets we return
+     * per page the page size is a multiplier for the costs of aggregating bucket.
+     *
+     * Initially this returns a default, in future it might inspect the configuration and base the initial size
+     * on the aggregations used.
+     *
+     * @return the page size
+     */
+    public int getInitialPageSize() {
+        return DEFAULT_INITIAL_PAGE_SIZE;
+    }
+
+    public SearchRequest buildSearchRequest(Map<String, Object> position, int pageSize) {
         if (position != null) {
             cachedCompositeAggregation.aggregateAfter(position);
         }
 
+        cachedCompositeAggregation.size(pageSize);
+
         return cachedSearchRequest;
     }
 
@@ -127,7 +147,6 @@ public class Pivot {
             XContentParser parser = builder.generator().contentType().xContent().createParser(NamedXContentRegistry.EMPTY,
                     LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput());
             compositeAggregation = CompositeAggregationBuilder.parse(COMPOSITE_AGGREGATION_NAME, parser);
-            compositeAggregation.size(1000);
             config.getAggregationConfig().getAggregatorFactories().forEach(agg -> compositeAggregation.subAggregation(agg));
         } catch (IOException e) {
             throw new RuntimeException(DataFrameMessages.DATA_FRAME_TRANSFORM_PIVOT_FAILED_TO_CREATE_COMPOSITE_AGGREGATION, e);
diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java
new file mode 100644
index 00000000000..b121e8091c1
--- /dev/null
+++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java
@@ -0,0 +1,239 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+
+package org.elasticsearch.xpack.dataframe.transforms;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.breaker.CircuitBreaker.Durability;
+import org.elasticsearch.common.breaker.CircuitBreakingException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.xpack.core.common.notifications.Auditor;
+import org.elasticsearch.xpack.core.dataframe.notifications.DataFrameAuditMessage;
+import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats;
+import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig;
+import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests;
+import org.elasticsearch.xpack.core.indexing.IndexerState;
+import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot;
+import org.junit.Before;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Consumer;
+import java.util.function.Function;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class DataFrameIndexerTests extends ESTestCase {
+
+    private Client client;
+    private static final String TEST_ORIGIN = "test_origin";
+    private static final String TEST_INDEX = "test_index";
+
+    class MockedDataFrameIndexer extends DataFrameIndexer {
+
+        private final DataFrameTransformConfig transformConfig;
+        private final Map<String, String> fieldMappings;
+        private final Function<SearchRequest, SearchResponse> searchFunction;
+        private final Function<BulkRequest, BulkResponse> bulkFunction;
+        private final Consumer<Exception> failureConsumer;
+
+        // used for synchronizing with the test
+        private CountDownLatch latch;
+
+        MockedDataFrameIndexer(
+                Executor executor,
+                DataFrameTransformConfig transformConfig,
+                Map<String, String> fieldMappings,
+                Auditor<DataFrameAuditMessage> auditor,
+                AtomicReference<IndexerState> initialState,
+                Map<String, Object> initialPosition,
+                DataFrameIndexerTransformStats jobStats,
+                Function<SearchRequest, SearchResponse> searchFunction,
+                Function<BulkRequest, BulkResponse> bulkFunction,
+                Consumer<Exception> failureConsumer) {
+            super(executor, auditor, initialState, initialPosition, jobStats);
+            this.transformConfig = Objects.requireNonNull(transformConfig);
+            this.fieldMappings = Objects.requireNonNull(fieldMappings);
+            this.searchFunction = searchFunction;
+            this.bulkFunction = bulkFunction;
+            this.failureConsumer = failureConsumer;
+        }
+
+        public CountDownLatch newLatch(int count) {
+            return latch = new CountDownLatch(count);
+        }
+
+        @Override
+        protected DataFrameTransformConfig getConfig() {
+            return transformConfig;
+        }
+
+        @Override
+        protected Map<String, String> getFieldMappings() {
+            return fieldMappings;
+        }
+
+        @Override
+        protected void createCheckpoint(ActionListener<Void> listener) {
+            listener.onResponse(null);
+        }
+
+        @Override
+        protected String getJobId() {
+            return transformConfig.getId();
+        }
+
+        @Override
+        protected void doNextSearch(SearchRequest request, ActionListener<SearchResponse> nextPhase) {
+            assert latch != null;
+            try {
+                latch.await();
+            } catch (InterruptedException e) {
+                throw new IllegalStateException(e);
+            }
+
+            try {
+                SearchResponse response = searchFunction.apply(request);
+                nextPhase.onResponse(response);
+            } catch (Exception e) {
+                nextPhase.onFailure(e);
+            }
+        }
+
+        @Override
+        protected void doNextBulk(BulkRequest request, ActionListener<BulkResponse> nextPhase) {
+            assert latch != null;
+            try {
+                latch.await();
+            } catch (InterruptedException e) {
+                throw new IllegalStateException(e);
+            }
+
+            try {
+                BulkResponse response = bulkFunction.apply(request);
+                nextPhase.onResponse(response);
+            } catch (Exception e) {
+                nextPhase.onFailure(e);
+            }
+        }
+
+        @Override
+        protected void doSaveState(IndexerState state, Map<String, Object> position, Runnable next) {
+            assert state == IndexerState.STARTED || state == IndexerState.INDEXING || state == IndexerState.STOPPED;
+            next.run();
+        }
+
+        @Override
+        protected void onFailure(Exception exc) {
+            try {
+                // mimic same behavior as {@link DataFrameTransformTask}
+                if (handleCircuitBreakingException(exc)) {
+                    return;
+                }
+
+                failureConsumer.accept(exc);
+            } catch (Exception e) {
+                fail("Internal error: " + e.getMessage());
+            }
+        }
+
+        @Override
+        protected void onFinish(ActionListener<Void> listener) {
+            super.onFinish(listener);
+            listener.onResponse(null);
+        }
+
+        @Override
+        protected void onAbort() {
+            fail("onAbort should not be called");
+        }
+
+        @Override
+        protected void failIndexer(String message) {
+            fail("failIndexer should not be called, received error: " + message);
+        }
+
+    }
+
+    @Before
+    public void setUpMocks() {
+        client = mock(Client.class);
+        ThreadPool threadPool = mock(ThreadPool.class);
+        when(client.threadPool()).thenReturn(threadPool);
+        when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
+    }
+
+    public void testPageSizeAdapt() throws InterruptedException {
+        DataFrameTransformConfig config = DataFrameTransformConfigTests.randomDataFrameTransformConfig();
+        AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED);
+
+        Function<SearchRequest, SearchResponse> searchFunction = searchRequest -> {
+            throw new SearchPhaseExecutionException("query", "Partial shards failure", new ShardSearchFailure[] {
+                    new ShardSearchFailure(new CircuitBreakingException("to much memory", 110, 100, Durability.TRANSIENT)) });
+        };
+
+        Function<BulkRequest, BulkResponse> bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100);
+
+        Consumer<Exception> failureConsumer = e -> {
+            fail("expected circuit breaker exception to be handled");
+        };
+
+        final ExecutorService executor = Executors.newFixedThreadPool(1);
+        try {
+            Auditor<DataFrameAuditMessage> auditor = new Auditor<>(client, "node_1", TEST_INDEX, TEST_ORIGIN,
+                    DataFrameAuditMessage.builder());
+
+            MockedDataFrameIndexer indexer = new MockedDataFrameIndexer(executor, config, Collections.emptyMap(), auditor, state, null,
+                    new DataFrameIndexerTransformStats(config.getId()), searchFunction, bulkFunction, failureConsumer);
+            final CountDownLatch latch = indexer.newLatch(1);
+            indexer.start();
+            assertThat(indexer.getState(), equalTo(IndexerState.STARTED));
+            assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis()));
+            assertThat(indexer.getState(), equalTo(IndexerState.INDEXING));
+            latch.countDown();
+            awaitBusy(() -> indexer.getState() == IndexerState.STOPPED);
+            long pageSizeAfterFirstReduction = indexer.getPageSize();
+            assertTrue(Pivot.DEFAULT_INITIAL_PAGE_SIZE > pageSizeAfterFirstReduction);
+            assertTrue(pageSizeAfterFirstReduction > DataFrameIndexer.MINIMUM_PAGE_SIZE);
+
+            // run indexer a 2nd time
+            final CountDownLatch secondRunLatch = indexer.newLatch(1);
+            indexer.start();
+            assertEquals(pageSizeAfterFirstReduction, indexer.getPageSize());
+            assertThat(indexer.getState(), equalTo(IndexerState.STARTED));
+            assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis()));
+            assertThat(indexer.getState(), equalTo(IndexerState.INDEXING));
+            secondRunLatch.countDown();
+            awaitBusy(() -> indexer.getState() == IndexerState.STOPPED);
+
+            // assert that page size has been reduced again
+            assertTrue(pageSizeAfterFirstReduction > indexer.getPageSize());
+            assertTrue(pageSizeAfterFirstReduction > DataFrameIndexer.MINIMUM_PAGE_SIZE);
+
+        } finally {
+            executor.shutdownNow();
+        }
+    }
+}

From a01dd96afec31ac9b8dd5a19e0c110ce08b7e772 Mon Sep 17 00:00:00 2001
From: Nik Everett <nik9000@gmail.com>
Date: Tue, 16 Apr 2019 13:47:39 -0400
Subject: [PATCH 047/112] Drop inline callouts from SQL conditional docs
 (#41205)

Drops "inline callouts" from the docs for SQL conditionals because they
aren't supported by Asciidoctor.

Relates to #41128
---
 .../sql/functions/conditional.asciidoc        | 31 ++++++++++++++-----
 1 file changed, 24 insertions(+), 7 deletions(-)

diff --git a/docs/reference/sql/functions/conditional.asciidoc b/docs/reference/sql/functions/conditional.asciidoc
index ce8d5c3e66c..135381a1c93 100644
--- a/docs/reference/sql/functions/conditional.asciidoc
+++ b/docs/reference/sql/functions/conditional.asciidoc
@@ -11,7 +11,10 @@ Functions that return one of their arguments by evaluating in an if-else manner.
 .Synopsis:
 [source, sql]
 ----
-COALESCE(expression<1>, expression<2>, ...)
+COALESCE(
+    expression, <1>
+    expression, <2>
+    ...)
 ----
 
 *Input*:
@@ -51,7 +54,10 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[coalesceReturnNull]
 .Synopsis:
 [source, sql]
 ----
-GREATEST(expression<1>, expression<2>, ...)
+GREATEST(
+    expression, <1>
+    expression, <2>
+    ...)
 ----
 
 *Input*:
@@ -92,7 +98,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[greatestReturnNull]
 .Synopsis:
 [source, sql]
 ----
-IFNULL(expression<1>, expression<2>)
+IFNULL(
+    expression, <1>
+    expression) <2>
 ----
 
 *Input*:
@@ -129,7 +137,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnSecond]
 .Synopsis:
 [source, sql]
 ----
-ISNULL(expression<1>, expression<2>)
+ISNULL(
+    expression, <1>
+    expression) <2>
 ----
 
 *Input*:
@@ -166,7 +176,10 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[isNullReturnSecond]
 .Synopsis:
 [source, sql]
 ----
-LEAST(expression<1>, expression<2>, ...)
+LEAST(
+    expression, <1>
+    expression, <2>
+    ...)
 ----
 
 *Input*:
@@ -208,7 +221,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[leastReturnNull]
 .Synopsis:
 [source, sql]
 ----
-NULLIF(expression<1>, expression<2>)
+NULLIF(
+    expression, <1>
+    expression) <2>
 ----
 
 *Input*:
@@ -243,7 +258,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[nullIfReturnNull]
 .Synopsis:
 [source, sql]
 ----
-NVL(expression<1>, expression<2>)
+NVL(
+    expression, <1>
+    expression) <2>
 ----
 
 *Input*:

From c4ffd758058084502e7a9b6e261dc6a45f64d357 Mon Sep 17 00:00:00 2001
From: Marios Trivyzas <matriv@gmail.com>
Date: Tue, 16 Apr 2019 20:39:54 +0300
Subject: [PATCH 048/112] SQL: Translate MIN/MAX on keyword fields as
 FIRST/LAST (#41247)

Although the translation rule was implemented in the `Optimizer`,
the rule was not added in the list of rules to be executed.

Relates to #41195
Follows #37936


(cherry picked from commit f426a339b77af6008d41cc000c9199fe384e9269)
---
 .../sql/qa/src/main/resources/agg.csv-spec    | 20 ++++++++++++++++
 .../xpack/sql/optimizer/Optimizer.java        |  1 +
 .../sql/planner/QueryTranslatorTests.java     | 24 +++++++++++++++++++
 3 files changed, 45 insertions(+)

diff --git a/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec
index b55c3f66eaf..5cc70a8cb5e 100644
--- a/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec
+++ b/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec
@@ -414,6 +414,26 @@ SELECT COUNT(ALL last_name)=COUNT(ALL first_name) AS areEqual, COUNT(ALL first_n
 false          |90             |100
 ;
 
+topHitsAsMinAndMax
+schema::min:s|max:s|first:s|last:s
+SELECT MIN(first_name) as min, MAX(first_name) as max, FIRST(first_name) as first, LAST(first_name) as last FROM test_emp;
+
+    min        |   max         |   first      |   last
+---------------+---------------+--------------+----------
+   Alejandro   |   Zvonko      |   Alejandro  |   Zvonko
+;
+
+topHitsAsMinAndMaxAndGroupBy
+schema::gender:s|min:s|max:s|first:s|last:s
+SELECT gender, MIN(first_name) as min, MAX(first_name) as max, FIRST(first_name) as first, LAST(first_name) as last FROM test_emp GROUP BY gender ORDER BY gender;
+
+    gender     |   min         |   max        |   first       |   last
+---------------+---------------+--------------+---------------+----------
+null           |   Berni       |   Patricio   |   Berni       |   Patricio
+F              |   Alejandro   |   Xinglin    |   Alejandro   |   Xinglin
+M              |   Amabile     |   Zvonko     |   Amabile     |   Zvonko
+;
+
 topHitsWithOneArgAndGroupBy
 schema::gender:s|first:s|last:s
 SELECT gender, FIRST(first_name) as first, LAST(first_name) as last FROM test_emp GROUP BY gender ORDER BY gender;
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java
index 6b1954f844c..d6e4c4fe07d 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java
@@ -149,6 +149,7 @@ public class Optimizer extends RuleExecutor<LogicalPlan> {
 
         Batch aggregate = new Batch("Aggregation Rewrite",
                 //new ReplaceDuplicateAggsWithReferences(),
+                new ReplaceMinMaxWithTopHits(),
                 new ReplaceAggsWithMatrixStats(),
                 new ReplaceAggsWithExtendedStats(),
                 new ReplaceAggsWithStats(),
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java
index c76e0da987d..85bc20596e9 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java
@@ -763,6 +763,18 @@ public class QueryTranslatorTests extends ESTestCase {
                         "\"explain\":false,\"docvalue_fields\":[{\"field\":\"keyword\"}]," +
                         "\"sort\":[{\"keyword\":{\"order\":\"asc\",\"missing\":\"_last\",\"unmapped_type\":\"keyword\"}}]}}}}}"));
         }
+        {
+            PhysicalPlan p = optimizeAndPlan("SELECT MIN(keyword) FROM test");
+            assertEquals(EsQueryExec.class, p.getClass());
+            EsQueryExec eqe = (EsQueryExec) p;
+            assertEquals(1, eqe.output().size());
+            assertEquals("MIN(keyword)", eqe.output().get(0).qualifiedName());
+            assertEquals(DataType.KEYWORD, eqe.output().get(0).dataType());
+            assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""),
+                endsWith("\"top_hits\":{\"from\":0,\"size\":1,\"version\":false,\"seq_no_primary_term\":false," +
+                    "\"explain\":false,\"docvalue_fields\":[{\"field\":\"keyword\"}]," +
+                    "\"sort\":[{\"keyword\":{\"order\":\"asc\",\"missing\":\"_last\",\"unmapped_type\":\"keyword\"}}]}}}}}"));
+        }
         {
             PhysicalPlan p = optimizeAndPlan("SELECT LAST(date) FROM test");
             assertEquals(EsQueryExec.class, p.getClass());
@@ -775,6 +787,18 @@ public class QueryTranslatorTests extends ESTestCase {
                     "\"explain\":false,\"docvalue_fields\":[{\"field\":\"date\",\"format\":\"epoch_millis\"}]," +
                     "\"sort\":[{\"date\":{\"order\":\"desc\",\"missing\":\"_last\",\"unmapped_type\":\"date\"}}]}}}}}"));
         }
+        {
+            PhysicalPlan p = optimizeAndPlan("SELECT MAX(keyword) FROM test");
+            assertEquals(EsQueryExec.class, p.getClass());
+            EsQueryExec eqe = (EsQueryExec) p;
+            assertEquals(1, eqe.output().size());
+            assertEquals("MAX(keyword)", eqe.output().get(0).qualifiedName());
+            assertEquals(DataType.KEYWORD, eqe.output().get(0).dataType());
+            assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""),
+                endsWith("\"top_hits\":{\"from\":0,\"size\":1,\"version\":false,\"seq_no_primary_term\":false," +
+                    "\"explain\":false,\"docvalue_fields\":[{\"field\":\"keyword\"}]," +
+                    "\"sort\":[{\"keyword\":{\"order\":\"desc\",\"missing\":\"_last\",\"unmapped_type\":\"keyword\"}}]}}}}}"));
+        }
     }
 
     public void testTopHitsAggregationWithTwoArgs() {

From 7d5ff5a1fabb2d98a9e3c8c7e4953fe1dff16f6f Mon Sep 17 00:00:00 2001
From: Nik Everett <nik9000@gmail.com>
Date: Tue, 16 Apr 2019 15:27:51 -0400
Subject: [PATCH 049/112] Docs: Drop inline callouts from two SQL pages
 (#41270)

Drops inline callouts from the docs for SQL's string and type-conversion
functions because they are not compatible with Asciidoctor.
---
 docs/reference/sql/functions/string.asciidoc  | 64 +++++++++++++------
 .../sql/functions/type-conversion.asciidoc    |  8 ++-
 2 files changed, 50 insertions(+), 22 deletions(-)

diff --git a/docs/reference/sql/functions/string.asciidoc b/docs/reference/sql/functions/string.asciidoc
index 45389cd410e..7acc3587635 100644
--- a/docs/reference/sql/functions/string.asciidoc
+++ b/docs/reference/sql/functions/string.asciidoc
@@ -11,7 +11,7 @@ Functions for performing string manipulation.
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-ASCII(string_exp<1>)
+ASCII(string_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -35,7 +35,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringAscii]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-BIT_LENGTH(string_exp<1>)
+BIT_LENGTH(string_exp) <1>
 --------------------------------------------------
 *Input*:
 
@@ -58,7 +58,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringBitLength]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-CHAR(code<1>)
+CHAR(code) <1>
 --------------------------------------------------
 *Input*:
 
@@ -81,7 +81,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringChar]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-CHAR_LENGTH(string_exp<1>)
+CHAR_LENGTH(string_exp) <1>
 --------------------------------------------------
 *Input*:
 
@@ -104,7 +104,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringCharLength]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-CONCAT(string_exp1<1>,string_exp2<2>)
+CONCAT(
+    string_exp1, <1>
+    string_exp2) <2>
 --------------------------------------------------
 *Input*:
 
@@ -128,7 +130,11 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringConcat]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-INSERT(source<1>, start<2>, length<3>, replacement<4>)
+INSERT(
+    source,      <1>
+    start,       <2>
+    length,      <3>
+    replacement) <4>
 --------------------------------------------------
 *Input*:
 
@@ -154,7 +160,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringInsert]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-LCASE(string_exp<1>)
+LCASE(string_exp) <1>
 --------------------------------------------------
 *Input*:
 
@@ -177,7 +183,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringLCase]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-LEFT(string_exp<1>, count<2>)
+LEFT(
+    string_exp, <1>
+    count)      <2>
 --------------------------------------------------
 *Input*:
 
@@ -201,7 +209,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringLeft]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-LENGTH(string_exp<1>)
+LENGTH(string_exp) <1>
 --------------------------------------------------
 *Input*:
 
@@ -224,7 +232,11 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringLength]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-LOCATE(pattern<1>, source<2>[, start]<3>)
+LOCATE(
+    pattern, <1>
+    source   <2>
+    [, start]<3>
+)
 --------------------------------------------------
 *Input*:
 
@@ -254,7 +266,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringLocateWithStart]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-LTRIM(string_exp<1>)
+LTRIM(string_exp) <1>
 --------------------------------------------------
 *Input*:
 
@@ -277,7 +289,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringLTrim]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-OCTET_LENGTH(string_exp<1>)
+OCTET_LENGTH(string_exp) <1>
 --------------------------------------------------
 *Input*:
 
@@ -300,7 +312,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringOctetLength]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-POSITION(string_exp1<1>, string_exp2<2>)
+POSITION(
+    string_exp1, <1>
+    string_exp2) <2>
 --------------------------------------------------
 *Input*:
 
@@ -324,7 +338,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringPosition]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-REPEAT(string_exp<1>, count<2>)
+REPEAT(
+    string_exp, <1>
+    count)      <2>
 --------------------------------------------------
 *Input*:
 
@@ -348,7 +364,10 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringRepeat]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-REPLACE(source<1>, pattern<2>, replacement<3>)
+REPLACE(
+    source,      <1>
+    pattern,     <2>
+    replacement) <3>
 --------------------------------------------------
 *Input*:
 
@@ -373,7 +392,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringReplace]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-RIGHT(string_exp<1>, count<2>)
+RIGHT(
+    string_exp, <1>
+    count)      <2>
 --------------------------------------------------
 *Input*:
 
@@ -397,7 +418,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringRight]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-RTRIM(string_exp<1>)
+RTRIM(string_exp) <1>
 --------------------------------------------------
 *Input*:
 
@@ -420,7 +441,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringRTrim]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-SPACE(count<1>)
+SPACE(count) <1>
 --------------------------------------------------
 *Input*:
 
@@ -443,7 +464,10 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringSpace]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-SUBSTRING(source<1>, start<2>, length<3>)
+SUBSTRING(
+    source, <1>
+    start,  <2>
+    length) <3>
 --------------------------------------------------
 *Input*:
 
@@ -468,7 +492,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringSubString]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-UCASE(string_exp<1>)
+UCASE(string_exp) <1>
 --------------------------------------------------
 *Input*:
 
diff --git a/docs/reference/sql/functions/type-conversion.asciidoc b/docs/reference/sql/functions/type-conversion.asciidoc
index 2187d5a2e92..7f8488be40f 100644
--- a/docs/reference/sql/functions/type-conversion.asciidoc
+++ b/docs/reference/sql/functions/type-conversion.asciidoc
@@ -11,7 +11,9 @@ Functions for converting an expression of one data type to another.
 .Synopsis:
 [source, sql]
 ----
-CAST(expression<1> AS data_type<2>)
+CAST(
+    expression <1>
+ AS data_type) <2>
 ----
 
 <1> Expression to cast
@@ -50,7 +52,9 @@ To obtain an {es} `float`, perform casting to its SQL equivalent, `real` type.
 .Synopsis:
 [source, sql]
 ----
-CONVERT(expression<1>, data_type<2>)
+CONVERT(
+    expression, <1>
+    data_type)  <2>
 ----
 
 <1> Expression to convert

From 85912b89febbcd57052da02a001c29d93c218657 Mon Sep 17 00:00:00 2001
From: Costin Leau <costin@users.noreply.github.com>
Date: Tue, 16 Apr 2019 22:40:29 +0300
Subject: [PATCH 050/112] SQL: Fix LIMIT bug in agg sorting (#41258)

When specifying a limit over an agg sorting, the limit will be pushed
down to the grouping which affects the custom sorting. This commit fixes
that and restricts the limit only to sorting.

Fix #40984

(cherry picked from commit da3726528d9011b05c0677ece6d11558994eccd9)
---
 .../src/main/resources/agg-ordering.sql-spec  | 24 +++++++++++++++++++
 .../sql/execution/search/SourceGenerator.java |  3 ++-
 2 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/x-pack/plugin/sql/qa/src/main/resources/agg-ordering.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/agg-ordering.sql-spec
index ed206da6d13..79d58c48e44 100644
--- a/x-pack/plugin/sql/qa/src/main/resources/agg-ordering.sql-spec
+++ b/x-pack/plugin/sql/qa/src/main/resources/agg-ordering.sql-spec
@@ -29,9 +29,24 @@ SELECT MAX(salary) AS max, MIN(salary) AS min FROM test_emp HAVING MIN(salary) >
 aggWithoutAlias
 SELECT MAX(salary) AS max FROM test_emp GROUP BY gender ORDER BY MAX(salary);
 
+aggWithoutAliasWithLimit
+SELECT MAX(salary) AS max FROM test_emp GROUP BY gender ORDER BY MAX(salary) LIMIT 3;
+
+aggWithoutAliasWithLimitDesc
+SELECT MAX(salary) AS max FROM test_emp GROUP BY gender ORDER BY MAX(salary) DESC LIMIT 3;
+
 aggWithAlias
 SELECT MAX(salary) AS m FROM test_emp GROUP BY gender ORDER BY m;
 
+aggOrderByCountWithLimit
+SELECT MAX(salary) AS max, COUNT(*) AS c FROM test_emp GROUP BY gender ORDER BY c LIMIT 3;
+
+aggOrderByCountWithLimitDescAndGrouping
+SELECT gender, COUNT(*) AS c FROM test_emp GROUP BY gender ORDER BY c DESC LIMIT 5;
+
+aggOrderByCountWithLimitDesc
+SELECT MAX(salary) AS max, COUNT(*) AS c FROM test_emp GROUP BY gender ORDER BY c DESC LIMIT 3;
+
 multipleAggsThatGetRewrittenWithoutAlias
 SELECT MAX(salary) AS max, MIN(salary) AS min FROM test_emp GROUP BY gender ORDER BY MAX(salary);
 
@@ -56,12 +71,21 @@ SELECT MIN(salary) AS min, COUNT(*) AS c FROM test_emp GROUP BY gender HAVING c
 aggNotSpecifiedInTheAggregateAndGroupWithHaving
 SELECT gender, MIN(salary) AS min, COUNT(*) AS c FROM test_emp GROUP BY gender HAVING c > 1 ORDER BY MAX(salary), gender;
 
+aggNotSpecifiedInTheAggregateAndGroupWithHavingWithLimit
+SELECT gender, MIN(salary) AS min, COUNT(*) AS c FROM test_emp GROUP BY gender HAVING c > 1 ORDER BY MAX(salary), c LIMIT 5;
+
+aggNotSpecifiedInTheAggregateAndGroupWithHavingWithLimitAndDirection
+SELECT gender, MIN(salary) AS min, COUNT(*) AS c FROM test_emp GROUP BY gender HAVING c > 1 ORDER BY MAX(salary) ASC, c DESC LIMIT 5;
+
 groupAndAggNotSpecifiedInTheAggregateWithHaving
 SELECT gender, MIN(salary) AS min, COUNT(*) AS c FROM test_emp GROUP BY gender HAVING c > 1 ORDER BY gender, MAX(salary);
 
 multipleAggsThatGetRewrittenWithAliasOnAMediumGroupBy
 SELECT languages, MAX(salary) AS max, MIN(salary) AS min FROM test_emp GROUP BY languages ORDER BY max;
 
+multipleAggsThatGetRewrittenWithAliasOnAMediumGroupByWithLimit
+SELECT languages, MAX(salary) AS max, MIN(salary) AS min FROM test_emp GROUP BY languages ORDER BY max DESC LIMIT 5;
+
 multipleAggsThatGetRewrittenWithAliasOnALargeGroupBy
 SELECT emp_no, MAX(salary) AS max, MIN(salary) AS min FROM test_emp GROUP BY emp_no ORDER BY max;
 
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java
index a8876e441f7..8d9e59617aa 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java
@@ -80,7 +80,8 @@ public abstract class SourceGenerator {
             if (source.size() == -1) {
                 source.size(sz);
             }
-            if (aggBuilder instanceof CompositeAggregationBuilder) {
+            // limit the composite aggs only for non-local sorting
+            if (aggBuilder instanceof CompositeAggregationBuilder && container.sortingColumns().isEmpty()) {
                 ((CompositeAggregationBuilder) aggBuilder).size(sz);
             }
         }

From 6a552c05fef4bc5aa963d362e2d44483e62b3b6b Mon Sep 17 00:00:00 2001
From: Yogesh Gaikwad <902768+bizybot@users.noreply.github.com>
Date: Wed, 17 Apr 2019 14:15:05 +1000
Subject: [PATCH 051/112] Use alias name from rollover request to query indices
 stats (#40774) (#41284)

In `TransportRolloverAction` before doing rollover we resolve
source index name (write index) from the alias in the rollover request.
Before evaluating the conditions and executing rollover action, we
retrieve stats, but to do so we used the source index name
resolved from the alias instead of alias from the index.
This fails when the user is assigned a role with index privilege on the
alias instead of the concrete index. This commit fixes this by using
the alias from the request.
After this change, verified that when we retrieve all the stats (including write + read indexes)
we are considering only source index.

Closes #40771
---
 .../rollover/TransportRolloverAction.java     |   4 +-
 .../TransportRolloverActionTests.java         | 148 +++++++++++++++++-
 .../authz/31_rollover_using_alias.yml         | 139 ++++++++++++++++
 3 files changed, 282 insertions(+), 9 deletions(-)
 create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml

diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java
index ce1f1dc2404..edd59f8b18c 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java
@@ -120,7 +120,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
         final String rolloverIndexName = indexNameExpressionResolver.resolveDateMathExpression(unresolvedName);
         MetaDataCreateIndexService.validateIndexName(rolloverIndexName, state); // will fail if the index already exists
         checkNoDuplicatedAliasInIndexTemplate(metaData, rolloverIndexName, rolloverRequest.getAlias());
-        client.admin().indices().prepareStats(sourceIndexName).clear().setDocs(true).execute(
+        client.admin().indices().prepareStats(rolloverRequest.getAlias()).clear().setDocs(true).execute(
             new ActionListener<IndicesStatsResponse>() {
                 @Override
                 public void onResponse(IndicesStatsResponse statsResponse) {
@@ -249,7 +249,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
 
     static Map<String, Boolean> evaluateConditions(final Collection<Condition<?>> conditions, final IndexMetaData metaData,
                                                     final IndicesStatsResponse statsResponse) {
-        return evaluateConditions(conditions, statsResponse.getPrimaries().getDocs(), metaData);
+        return evaluateConditions(conditions, statsResponse.getIndex(metaData.getIndex().getName()).getPrimaries().getDocs(), metaData);
     }
 
     static void validate(MetaData metaData, RolloverRequest request) {
diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java
index ec3c82ba70b..058dcc72430 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java
@@ -20,17 +20,30 @@
 package org.elasticsearch.action.admin.indices.rollover;
 
 import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest;
 import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
 import org.elasticsearch.action.admin.indices.stats.CommonStats;
+import org.elasticsearch.action.admin.indices.stats.IndexStats;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
 import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.support.ActionFilters;
 import org.elasticsearch.action.support.ActiveShardCount;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.metadata.AliasAction;
 import org.elasticsearch.cluster.metadata.AliasMetaData;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
 import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
 import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
+import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService;
+import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.UUIDs;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.ByteSizeUnit;
@@ -39,9 +52,12 @@ import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.util.set.Sets;
 import org.elasticsearch.index.shard.DocsStats;
 import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
 import org.mockito.ArgumentCaptor;
 
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
@@ -51,7 +67,9 @@ import static org.elasticsearch.action.admin.indices.rollover.TransportRolloverA
 import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
 import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
@@ -64,7 +82,9 @@ public class TransportRolloverActionTests extends ESTestCase {
         long docsInShards = 200;
 
         final Condition<?> condition = createTestCondition();
-        evaluateConditions(Sets.newHashSet(condition), createMetaData(), createIndicesStatResponse(docsInShards, docsInPrimaryShards));
+        String indexName = randomAlphaOfLengthBetween(5, 7);
+        evaluateConditions(Sets.newHashSet(condition), createMetaData(indexName),
+                createIndicesStatResponse(indexName, docsInShards, docsInPrimaryShards));
         final ArgumentCaptor<Condition.Stats> argument = ArgumentCaptor.forClass(Condition.Stats.class);
         verify(condition).evaluate(argument.capture());
 
@@ -286,7 +306,7 @@ public class TransportRolloverActionTests extends ESTestCase {
             .patterns(Arrays.asList("foo-*", "bar-*"))
             .putAlias(AliasMetaData.builder("foo-write")).putAlias(AliasMetaData.builder("bar-write").writeIndex(randomBoolean()))
             .build();
-        final MetaData metaData = MetaData.builder().put(createMetaData(), false).put(template).build();
+        final MetaData metaData = MetaData.builder().put(createMetaData(randomAlphaOfLengthBetween(5, 7)), false).put(template).build();
         String indexName = randomFrom("foo-123", "bar-xyz");
         String aliasName = randomFrom("foo-write", "bar-write");
         final IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
@@ -294,9 +314,92 @@ public class TransportRolloverActionTests extends ESTestCase {
         assertThat(ex.getMessage(), containsString("index template [test-template]"));
     }
 
-    private IndicesStatsResponse createIndicesStatResponse(long totalDocs, long primaryDocs) {
+    public void testConditionEvaluationWhenAliasToWriteAndReadIndicesConsidersOnlyPrimariesFromWriteIndex() {
+        final TransportService mockTransportService = mock(TransportService.class);
+        final ClusterService mockClusterService = mock(ClusterService.class);
+        final ThreadPool mockThreadPool = mock(ThreadPool.class);
+        final MetaDataCreateIndexService mockCreateIndexService = mock(MetaDataCreateIndexService.class);
+        final IndexNameExpressionResolver mockIndexNameExpressionResolver = mock(IndexNameExpressionResolver.class);
+        when(mockIndexNameExpressionResolver.resolveDateMathExpression(any())).thenReturn("logs-index-000003");
+        final ActionFilters mockActionFilters = mock(ActionFilters.class);
+        final MetaDataIndexAliasesService mdIndexAliasesService = mock(MetaDataIndexAliasesService.class);
+
+        final Client mockClient = mock(Client.class);
+        final AdminClient mockAdminClient = mock(AdminClient.class);
+        final IndicesAdminClient mockIndicesAdminClient = mock(IndicesAdminClient.class);
+        when(mockClient.admin()).thenReturn(mockAdminClient);
+        when(mockAdminClient.indices()).thenReturn(mockIndicesAdminClient);
+
+        final IndicesStatsRequestBuilder mockIndicesStatsBuilder = mock(IndicesStatsRequestBuilder.class);
+        when(mockIndicesAdminClient.prepareStats(any())).thenReturn(mockIndicesStatsBuilder);
+        final Map<String, IndexStats> indexStats = new HashMap<>();
+        int total = randomIntBetween(500, 1000);
+        indexStats.put("logs-index-000001", createIndexStats(200L, total));
+        indexStats.put("logs-index-000002", createIndexStats(300L, total));
+        final IndicesStatsResponse statsResponse = createAliasToMultipleIndicesStatsResponse(indexStats);
+        when(mockIndicesStatsBuilder.clear()).thenReturn(mockIndicesStatsBuilder);
+        when(mockIndicesStatsBuilder.setDocs(true)).thenReturn(mockIndicesStatsBuilder);
+
+        assert statsResponse.getPrimaries().getDocs().getCount() == 500L;
+        assert statsResponse.getTotal().getDocs().getCount() == (total + total);
+
+        doAnswer(invocation -> {
+            Object[] args = invocation.getArguments();
+            assert args.length == 1;
+            ActionListener<IndicesStatsResponse> listener = (ActionListener<IndicesStatsResponse>) args[0];
+            listener.onResponse(statsResponse);
+            return null;
+        }).when(mockIndicesStatsBuilder).execute(any(ActionListener.class));
+
+        final IndexMetaData.Builder indexMetaData = IndexMetaData.builder("logs-index-000001")
+                .putAlias(AliasMetaData.builder("logs-alias").writeIndex(false).build()).settings(settings(Version.CURRENT))
+                .numberOfShards(1).numberOfReplicas(1);
+        final IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("logs-index-000002")
+                .putAlias(AliasMetaData.builder("logs-alias").writeIndex(true).build()).settings(settings(Version.CURRENT))
+                .numberOfShards(1).numberOfReplicas(1);
+        final ClusterState stateBefore = ClusterState.builder(ClusterName.DEFAULT)
+                .metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2)).build();
+
+        final TransportRolloverAction transportRolloverAction = new TransportRolloverAction(mockTransportService, mockClusterService,
+                mockThreadPool, mockCreateIndexService, mockActionFilters, mockIndexNameExpressionResolver, mdIndexAliasesService,
+                mockClient);
+
+        // For given alias, verify that condition evaluation fails when the condition doc count is greater than the primaries doc count
+        // (primaries from only write index is considered)
+        PlainActionFuture<RolloverResponse> future = new PlainActionFuture<>();
+        RolloverRequest rolloverRequest = new RolloverRequest("logs-alias", "logs-index-000003");
+        rolloverRequest.addMaxIndexDocsCondition(500L);
+        rolloverRequest.dryRun(true);
+        transportRolloverAction.masterOperation(rolloverRequest, stateBefore, future);
+
+        RolloverResponse response = future.actionGet();
+        assertThat(response.getOldIndex(), equalTo("logs-index-000002"));
+        assertThat(response.getNewIndex(), equalTo("logs-index-000003"));
+        assertThat(response.isDryRun(), equalTo(true));
+        assertThat(response.isRolledOver(), equalTo(false));
+        assertThat(response.getConditionStatus().size(), equalTo(1));
+        assertThat(response.getConditionStatus().get("[max_docs: 500]"), is(false));
+
+        // For given alias, verify that the condition evaluation is successful when condition doc count is less than the primaries doc count
+        // (primaries from only write index is considered)
+        future = new PlainActionFuture<>();
+        rolloverRequest = new RolloverRequest("logs-alias", "logs-index-000003");
+        rolloverRequest.addMaxIndexDocsCondition(300L);
+        rolloverRequest.dryRun(true);
+        transportRolloverAction.masterOperation(rolloverRequest, stateBefore, future);
+
+        response = future.actionGet();
+        assertThat(response.getOldIndex(), equalTo("logs-index-000002"));
+        assertThat(response.getNewIndex(), equalTo("logs-index-000003"));
+        assertThat(response.isDryRun(), equalTo(true));
+        assertThat(response.isRolledOver(), equalTo(false));
+        assertThat(response.getConditionStatus().size(), equalTo(1));
+        assertThat(response.getConditionStatus().get("[max_docs: 300]"), is(true));
+    }
+
+    private IndicesStatsResponse createIndicesStatResponse(String indexName, long totalDocs, long primariesDocs) {
         final CommonStats primaryStats = mock(CommonStats.class);
-        when(primaryStats.getDocs()).thenReturn(new DocsStats(primaryDocs, 0, between(1, 10000)));
+        when(primaryStats.getDocs()).thenReturn(new DocsStats(primariesDocs, 0, between(1, 10000)));
 
         final CommonStats totalStats = mock(CommonStats.class);
         when(totalStats.getDocs()).thenReturn(new DocsStats(totalDocs, 0, between(1, 10000)));
@@ -304,18 +407,49 @@ public class TransportRolloverActionTests extends ESTestCase {
         final IndicesStatsResponse response = mock(IndicesStatsResponse.class);
         when(response.getPrimaries()).thenReturn(primaryStats);
         when(response.getTotal()).thenReturn(totalStats);
-
+        final IndexStats indexStats = mock(IndexStats.class);
+        when(response.getIndex(indexName)).thenReturn(indexStats);
+        when(indexStats.getPrimaries()).thenReturn(primaryStats);
+        when(indexStats.getTotal()).thenReturn(totalStats);
         return response;
     }
 
-    private static IndexMetaData createMetaData() {
+    private IndicesStatsResponse createAliasToMultipleIndicesStatsResponse(Map<String, IndexStats> indexStats) {
+        final IndicesStatsResponse response = mock(IndicesStatsResponse.class);
+        final CommonStats primariesStats = new CommonStats();
+        final CommonStats totalStats = new CommonStats();
+        for (String indexName : indexStats.keySet()) {
+            when(response.getIndex(indexName)).thenReturn(indexStats.get(indexName));
+            primariesStats.add(indexStats.get(indexName).getPrimaries());
+            totalStats.add(indexStats.get(indexName).getTotal());
+        }
+
+        when(response.getPrimaries()).thenReturn(primariesStats);
+        when(response.getTotal()).thenReturn(totalStats);
+        return response;
+    }
+
+    private IndexStats createIndexStats(long primaries, long total) {
+        final CommonStats primariesCommonStats = mock(CommonStats.class);
+        when(primariesCommonStats.getDocs()).thenReturn(new DocsStats(primaries, 0, between(1, 10000)));
+
+        final CommonStats totalCommonStats = mock(CommonStats.class);
+        when(totalCommonStats.getDocs()).thenReturn(new DocsStats(total, 0, between(1, 10000)));
+
+        IndexStats indexStats = mock(IndexStats.class);
+        when(indexStats.getPrimaries()).thenReturn(primariesCommonStats);
+        when(indexStats.getTotal()).thenReturn(totalCommonStats);
+        return indexStats;
+    }
+
+    private static IndexMetaData createMetaData(String indexName) {
         final Settings settings = Settings.builder()
             .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
             .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
             .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
             .build();
-        return IndexMetaData.builder(randomAlphaOfLength(10))
+        return IndexMetaData.builder(indexName)
             .creationDate(System.currentTimeMillis() - TimeValue.timeValueHours(3).getMillis())
             .settings(settings)
             .build();
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml
new file mode 100644
index 00000000000..52b6259f7cc
--- /dev/null
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml
@@ -0,0 +1,139 @@
+---
+
+setup:
+  - skip:
+      features: headers
+
+  - do:
+      cluster.health:
+          wait_for_status: yellow
+
+  - do:
+      security.put_role:
+        name: "alias_write_manage_role"
+        body:  >
+            {
+              "indices": [
+                { "names": ["write_manage_alias"], "privileges": ["write", "manage"] }
+              ]
+            }
+
+  - do:
+      security.put_user:
+        username: "test_user"
+        body:  >
+          {
+            "password" : "x-pack-test-password",
+            "roles" : [ "alias_write_manage_role" ],
+            "full_name" : "user with privileges to write, manage via alias"
+          }
+
+  - do:
+      indices.create:
+        index: logs-000001
+        body:
+          settings:
+            index:
+              number_of_shards:   1
+              number_of_replicas: 0
+
+  - do:
+      indices.put_alias:
+        index: logs-000001
+        name: write_manage_alias
+
+---
+teardown:
+  - do:
+      security.delete_user:
+        username: "test_user"
+        ignore: 404
+
+  - do:
+      security.delete_role:
+        name: "alias_write_role"
+        ignore: 404
+
+  - do:
+      indices.delete_alias:
+        index: "logs-000001"
+        name: [ "write_manage_alias" ]
+        ignore: 404
+
+  - do:
+      indices.delete:
+        index: [ "logs-000001" ]
+        ignore: 404
+
+---
+"Test rollover, index via write alias of index":
+
+  # index using alias
+  - do:
+      headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user
+      create:
+        id: 1
+        index: write_manage_alias
+        body: >
+          {
+            "name" : "doc1"
+          }
+
+  - do:
+      headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user
+      create:
+        id: 2
+        index: write_manage_alias
+        body: >
+          {
+            "name" : "doc2"
+          }
+
+  - do:
+      indices.refresh: {}
+
+  # rollover using alias
+  - do:
+      headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user
+      indices.rollover:
+        alias: "write_manage_alias"
+        wait_for_active_shards: 1
+        body:
+          conditions:
+            max_docs: 1
+
+  - match: { old_index: logs-000001 }
+  - match: { new_index: logs-000002 }
+  - match: { rolled_over: true }
+  - match: { dry_run: false }
+  - match: { conditions: { "[max_docs: 1]": true } }
+
+  # ensure new index is created
+  - do:
+      indices.exists:
+        index: logs-000002
+
+  - is_true: ''
+
+  # index using alias
+  - do:
+      headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user
+      create:
+        id: 3
+        index: write_manage_alias
+        body: >
+          {
+            "name" : "doc3"
+          }
+
+  - do:
+      indices.refresh: {}
+
+  # check alias points to the new index and the doc was indexed
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: write_manage_alias
+
+  - match: { hits.total: 1 }
+  - match: { hits.hits.0._index: "logs-000002"}

From 0d1178fca69c13cb17f09fc42393103b3cd8e742 Mon Sep 17 00:00:00 2001
From: Yogesh Gaikwad <902768+bizybot@users.noreply.github.com>
Date: Wed, 17 Apr 2019 14:25:33 +1000
Subject: [PATCH 052/112] put mapping authorization for alias with write-index
 and multiple read indices (#40834) (#41287)

When the same alias points to multiple indices we can write to only one index
with `is_write_index` value `true`. The special handling in case of the put
mapping request(to resolve authorized indices) has a check on indices size
for a concrete index. If multiple indices existed then it marked the request
as unauthorized.

The check has been modified to consider write index flag and only when the
requested index matches with the one with write index alias, the alias is considered
for authorization.

Closes #40831
---
 .../xpack/security/PermissionsIT.java         | 123 +++++++++++++++++-
 .../authz/IndicesAndAliasesResolver.java      |  12 +-
 .../authz/IndicesAndAliasesResolverTests.java |  29 ++++-
 3 files changed, 158 insertions(+), 6 deletions(-)

diff --git a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java
index 78fc2700f86..5503e12cb8b 100644
--- a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java
+++ b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java
@@ -7,9 +7,12 @@ package org.elasticsearch.xpack.security;
 
 import org.apache.http.entity.ContentType;
 import org.apache.http.entity.StringEntity;
+import org.elasticsearch.client.Node;
 import org.elasticsearch.client.Request;
 import org.elasticsearch.client.Response;
 import org.elasticsearch.client.ResponseException;
+import org.elasticsearch.client.RestClient;
+import org.elasticsearch.client.RestClientBuilder;
 import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.settings.SecureString;
 import org.elasticsearch.common.settings.Settings;
@@ -19,6 +22,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentHelper;
 import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
 import org.elasticsearch.rest.RestStatus;
 import org.elasticsearch.test.rest.ESRestTestCase;
 import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction;
@@ -26,6 +30,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction;
 import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy;
 import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings;
 import org.elasticsearch.xpack.core.indexlifecycle.Phase;
+import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction;
 import org.junit.Before;
 
 import java.io.IOException;
@@ -36,8 +41,10 @@ import static java.util.Collections.singletonMap;
 import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
 import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
 import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
 
 public class PermissionsIT extends ESRestTestCase {
+    private static final String jsonDoc = "{ \"name\" : \"elasticsearch\", \"body\": \"foo bar\" }";
 
     private String deletePolicy = "deletePolicy";
     private Settings indexSettingsWithPolicy;
@@ -74,7 +81,7 @@ public class PermissionsIT extends ESRestTestCase {
             .put("number_of_shards", 1)
             .put("number_of_replicas", 0)
             .build();
-        createNewSingletonPolicy(deletePolicy,"delete", new DeleteAction());
+        createNewSingletonPolicy(client(), deletePolicy,"delete", new DeleteAction());
     }
 
     /**
@@ -126,7 +133,62 @@ public class PermissionsIT extends ESRestTestCase {
         assertOK(client().performRequest(request));
     }
 
-    private void createNewSingletonPolicy(String policy, String phaseName, LifecycleAction action) throws IOException {
+    /**
+     * Tests when the user is limited by alias of an index is able to write to index
+     * which was rolled over by an ILM policy.
+     */
+    public void testWhenUserLimitedByOnlyAliasOfIndexCanWriteToIndexWhichWasRolledoverByILMPolicy()
+            throws IOException, InterruptedException {
+        /*
+         * Setup:
+         * - ILM policy to rollover index when max docs condition is met
+         * - Index template to which the ILM policy applies and create Index
+         * - Create role with just write and manage privileges on alias
+         * - Create user and assign newly created role.
+         */
+        createNewSingletonPolicy(adminClient(), "foo-policy", "hot", new RolloverAction(null, null, 2L));
+        createIndexTemplate("foo-template", "foo-logs-*", "foo_alias", "foo-policy");
+        createIndexAsAdmin("foo-logs-000001", "foo_alias", randomBoolean());
+        createRole("foo_alias_role", "foo_alias");
+        createUser("test_user", "x-pack-test-password", "foo_alias_role");
+
+        // test_user: index docs using alias in the newly created index
+        indexDocs("test_user", "x-pack-test-password", "foo_alias", 2);
+        refresh("foo_alias");
+
+        // wait so the ILM policy triggers rollover action, verify that the new index exists
+        assertThat(awaitBusy(() -> {
+            Request request = new Request("HEAD", "/" + "foo-logs-000002");
+            int status;
+            try {
+                status = adminClient().performRequest(request).getStatusLine().getStatusCode();
+            } catch (IOException e) {
+                throw new RuntimeException(e);
+            }
+            return status == 200;
+        }), is(true));
+
+        // test_user: index docs using alias, now should be able write to new index
+        indexDocs("test_user", "x-pack-test-password", "foo_alias", 1);
+        refresh("foo_alias");
+
+        // verify that the doc has been indexed into new write index
+        awaitBusy(() -> {
+            Request request = new Request("GET", "/foo-logs-000002/_search");
+            Response response;
+            try {
+                response = adminClient().performRequest(request);
+                try (InputStream content = response.getEntity().getContent()) {
+                    Map<String, Object> map = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false);
+                    return ((Integer) XContentMapValues.extractValue("hits.total.value", map)) == 1;
+                }
+            } catch (IOException e) {
+                throw new RuntimeException(e);
+            }
+        });
+    }
+
+    private void createNewSingletonPolicy(RestClient client, String policy, String phaseName, LifecycleAction action) throws IOException {
         Phase phase = new Phase(phaseName, TimeValue.ZERO, singletonMap(action.getWriteableName(), action));
         LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, singletonMap(phase.getName(), phase));
         XContentBuilder builder = jsonBuilder();
@@ -135,7 +197,7 @@ public class PermissionsIT extends ESRestTestCase {
             "{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON);
         Request request = new Request("PUT", "_ilm/policy/" + policy);
         request.setEntity(entity);
-        client().performRequest(request);
+        assertOK(client.performRequest(request));
     }
 
     private void createIndexAsAdmin(String name, Settings settings, String mapping) throws IOException {
@@ -144,4 +206,59 @@ public class PermissionsIT extends ESRestTestCase {
             + ", \"mappings\" : {" + mapping + "} }");
         assertOK(adminClient().performRequest(request));
     }
+
+    private void createIndexAsAdmin(String name, String alias, boolean isWriteIndex) throws IOException {
+        Request request = new Request("PUT", "/" + name);
+        request.setJsonEntity("{ \"aliases\": { \""+alias+"\": {" + ((isWriteIndex) ? "\"is_write_index\" : true" : "")
+            + "} } }");
+        assertOK(adminClient().performRequest(request));
+    }
+
+    private void createIndexTemplate(String name, String pattern, String alias, String policy) throws IOException {
+        Request request = new Request("PUT", "/_template/" + name);
+        request.setJsonEntity("{\n" +
+                "                \"index_patterns\": [\""+pattern+"\"],\n" +
+                "                \"settings\": {\n" +
+                "                   \"number_of_shards\": 1,\n" +
+                "                   \"number_of_replicas\": 0,\n" +
+                "                   \"index.lifecycle.name\": \""+policy+"\",\n" +
+                "                   \"index.lifecycle.rollover_alias\": \""+alias+"\"\n" +
+                "                 }\n" +
+                "              }");
+        assertOK(adminClient().performRequest(request));
+    }
+
+    private void createUser(String name, String password, String role) throws IOException {
+        Request request = new Request("PUT", "/_security/user/" + name);
+        request.setJsonEntity("{ \"password\": \""+password+"\", \"roles\": [ \""+ role+"\"] }");
+        assertOK(adminClient().performRequest(request));
+    }
+
+    private void createRole(String name, String alias) throws IOException {
+        Request request = new Request("PUT", "/_security/role/" + name);
+        request.setJsonEntity("{ \"indices\": [ { \"names\" : [ \""+ alias+"\"], \"privileges\": [ \"write\", \"manage\" ] } ] }");
+        assertOK(adminClient().performRequest(request));
+    }
+
+    private void indexDocs(String user, String passwd, String index, int noOfDocs) throws IOException {
+        RestClientBuilder builder = RestClient.builder(adminClient().getNodes().toArray(new Node[0]));
+        String token = basicAuthHeaderValue(user, new SecureString(passwd.toCharArray()));
+        configureClient(builder, Settings.builder()
+                .put(ThreadContext.PREFIX + ".Authorization", token)
+                .build());
+        builder.setStrictDeprecationMode(true);
+        try (RestClient userClient = builder.build();) {
+
+            for (int cnt = 0; cnt < noOfDocs; cnt++) {
+                Request request = new Request("POST", "/" + index + "/_doc");
+                request.setJsonEntity(jsonDoc);
+                assertOK(userClient.performRequest(request));
+            }
+        }
+    }
+
+    private void refresh(String index) throws IOException {
+        Request request = new Request("POST", "/" + index + "/_refresh");
+        assertOK(adminClient().performRequest(request));
+    }
 }
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java
index e5d4609c13f..6e0c2ed0bb1 100644
--- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java
@@ -249,7 +249,17 @@ class IndicesAndAliasesResolver {
                 Optional<String> foundAlias = aliasMetaData.stream()
                     .map(AliasMetaData::alias)
                     .filter(authorizedIndicesList::contains)
-                    .filter(aliasName -> metaData.getAliasAndIndexLookup().get(aliasName).getIndices().size() == 1)
+                    .filter(aliasName -> {
+                        AliasOrIndex alias = metaData.getAliasAndIndexLookup().get(aliasName);
+                        List<IndexMetaData> indexMetadata = alias.getIndices();
+                        if (indexMetadata.size() == 1) {
+                            return true;
+                        } else {
+                            assert alias instanceof AliasOrIndex.Alias;
+                            IndexMetaData idxMeta = ((AliasOrIndex.Alias) alias).getWriteIndex();
+                            return idxMeta != null && idxMeta.getIndex().getName().equals(concreteIndexName);
+                        }
+                    })
                     .findFirst();
                 resolvedAliasOrIndex = foundAlias.orElse(concreteIndexName);
             } else {
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java
index 2f09b74ac3d..dc32580980e 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java
@@ -72,6 +72,7 @@ import org.joda.time.format.DateTimeFormat;
 import org.junit.Before;
 
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -104,7 +105,6 @@ public class IndicesAndAliasesResolverTests extends ESTestCase {
     private IndicesAndAliasesResolver defaultIndicesResolver;
     private IndexNameExpressionResolver indexNameExpressionResolver;
     private Map<String, RoleDescriptor> roleMap;
-    private FieldPermissionsCache fieldPermissionsCache;
 
     @Before
     public void setup() {
@@ -138,13 +138,15 @@ public class IndicesAndAliasesResolverTests extends ESTestCase {
                 .put(indexBuilder("-index11").settings(settings))
                 .put(indexBuilder("-index20").settings(settings))
                 .put(indexBuilder("-index21").settings(settings))
+                .put(indexBuilder("logs-00001").putAlias(AliasMetaData.builder("logs-alias").writeIndex(false)).settings(settings))
+                .put(indexBuilder("logs-00002").putAlias(AliasMetaData.builder("logs-alias").writeIndex(false)).settings(settings))
+                .put(indexBuilder("logs-00003").putAlias(AliasMetaData.builder("logs-alias").writeIndex(true)).settings(settings))
                 .put(indexBuilder(securityIndexName).settings(settings)).build();
 
         if (withAlias) {
             metaData = SecurityTestUtils.addAliasToMetaData(metaData, securityIndexName);
         }
         this.metaData = metaData;
-        this.fieldPermissionsCache = new FieldPermissionsCache(settings);
 
         user = new User("user", "role");
         userDashIndices = new User("dash", "dash");
@@ -1355,6 +1357,29 @@ public class IndicesAndAliasesResolverTests extends ESTestCase {
         request = new PutMappingRequest(Strings.EMPTY_ARRAY).setConcreteIndex(new Index(index, UUIDs.base64UUID()));
         putMappingIndexOrAlias = IndicesAndAliasesResolver.getPutMappingIndexOrAlias(request, authorizedIndices, metaData);
         assertEquals(index, putMappingIndexOrAlias);
+
+    }
+
+    public void testWhenAliasToMultipleIndicesAndUserIsAuthorizedUsingAliasReturnsAliasNameForDynamicPutMappingRequestOnWriteIndex() {
+        String index = "logs-00003"; // write index
+        PutMappingRequest request = new PutMappingRequest(Strings.EMPTY_ARRAY).setConcreteIndex(new Index(index, UUIDs.base64UUID()));
+        List<String> authorizedIndices = Collections.singletonList("logs-alias");
+        assert metaData.getAliasAndIndexLookup().get("logs-alias").getIndices().size() == 3;
+        String putMappingIndexOrAlias = IndicesAndAliasesResolver.getPutMappingIndexOrAlias(request, authorizedIndices, metaData);
+        String message = "user is authorized to access `logs-alias` and the put mapping request is for a write index"
+                + "so this should have returned the alias name";
+        assertEquals(message, "logs-alias", putMappingIndexOrAlias);
+    }
+
+    public void testWhenAliasToMultipleIndicesAndUserIsAuthorizedUsingAliasReturnsIndexNameForDynamicPutMappingRequestOnReadIndex() {
+        String index = "logs-00002"; // read index
+        PutMappingRequest request = new PutMappingRequest(Strings.EMPTY_ARRAY).setConcreteIndex(new Index(index, UUIDs.base64UUID()));
+        List<String> authorizedIndices = Collections.singletonList("logs-alias");
+        assert metaData.getAliasAndIndexLookup().get("logs-alias").getIndices().size() == 3;
+        String putMappingIndexOrAlias = IndicesAndAliasesResolver.getPutMappingIndexOrAlias(request, authorizedIndices, metaData);
+        String message = "user is authorized to access `logs-alias` and the put mapping request is for a read index"
+                + "so this should have returned the concrete index as fallback";
+        assertEquals(message, index, putMappingIndexOrAlias);
     }
 
     // TODO with the removal of DeleteByQuery is there another way to test resolving a write action?

From 84e2f9d8ecb834b08e8188c81e8f0ac76fa93e61 Mon Sep 17 00:00:00 2001
From: Alpar Torok <torokalpar@gmail.com>
Date: Wed, 17 Apr 2019 08:58:11 +0300
Subject: [PATCH 053/112] fix the packer cache script (#41183)

* fix the packer cache script

This PR disabled the explicit pull since it seems this always tries to
work with a registry.
Functionality will not be affected since we will still build the images
on pull.
---
 distribution/docker/build.gradle | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle
index 07e8926b2a3..71fc62673dc 100644
--- a/distribution/docker/build.gradle
+++ b/distribution/docker/build.gradle
@@ -138,3 +138,8 @@ for (final boolean oss : [false, true]) {
 
 assemble.dependsOn "buildOssDockerImage"
 assemble.dependsOn "buildDockerImage"
+
+// We build the images used in compose locally, but the pull command insists on using a repository
+// thus we must disable it to prevent it from doing so. 
+// Everything will still be pulled since we will build the local images on a pull
+composePull.enabled = false

From 3fd081528dbd29d573644949fee974869f624cca Mon Sep 17 00:00:00 2001
From: Marios Trivyzas <matriv@gmail.com>
Date: Wed, 17 Apr 2019 10:12:11 +0300
Subject: [PATCH 054/112] SQL: Allow current_date/time/timestamp to be also
 used as a function escape pattern (#41254)

CURRENT_DATE/CURRENT_TIME/CURRENT_TIMESTAMP can be used as SQL keywords
(without parentheses) and therefore there is a special rule in the
grammar to accommodate this.

Previously, this rule was also catching the parenthesised version of those functions too,
not allowing the {fn <functionName>()} to be used. E.g.:
{fn current_time(2)} or {fn current_timestamp()}

Now, the grammar rule catches only the keyword versions and all the parenthesised
versions go through the normal function resolution. As a consequence the validation
of the precision is moved from the parser lever (ExpressionBuilder) to the function
implementations.

Fixes: #41240
(cherry picked from commit bfbc9f140144b5a35aa29008b58bf58074419853)
---
 x-pack/plugin/sql/src/main/antlr/SqlBase.g4   |   8 +-
 .../scalar/datetime/CurrentDateTime.java      |  12 +-
 .../function/scalar/datetime/CurrentTime.java |  13 +-
 .../xpack/sql/parser/ExpressionBuilder.java   |  20 +-
 .../xpack/sql/parser/SqlBaseParser.java       | 722 +++++++++---------
 .../xpack/sql/util/DateUtils.java             |  25 +
 .../scalar/datetime/CurrentDateTimeTests.java |  31 +-
 .../scalar/datetime/CurrentTimeTests.java     |  31 +-
 .../sql/parser/EscapedFunctionsTests.java     |  23 +
 .../xpack/sql/parser/ExpressionTests.java     |  14 +-
 10 files changed, 468 insertions(+), 431 deletions(-)

diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4
index fd47efbc5c6..d814e9e60f2 100644
--- a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4
+++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4
@@ -226,9 +226,9 @@ primaryExpression
     ;
 
 builtinDateTimeFunction
-    : name=CURRENT_TIMESTAMP ('(' precision=INTEGER_VALUE? ')')?
-    | name=CURRENT_DATE ('(' ')')?
-    | name=CURRENT_TIME ('(' precision=INTEGER_VALUE? ')')?
+    : name=CURRENT_TIMESTAMP
+    | name=CURRENT_DATE
+    | name=CURRENT_TIME
     ;
 
 castExpression
@@ -340,7 +340,7 @@ string
 // http://developer.mimer.se/validator/sql-reserved-words.tml
 nonReserved
     : ANALYZE | ANALYZED 
-    | CATALOGS | COLUMNS
+    | CATALOGS | COLUMNS | CURRENT_DATE | CURRENT_TIME | CURRENT_TIMESTAMP
     | DAY | DEBUG  
     | EXECUTABLE | EXPLAIN 
     | FIRST | FORMAT | FULL | FUNCTIONS
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTime.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTime.java
index 59f1251096f..16791421eb6 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTime.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTime.java
@@ -7,7 +7,6 @@
 package org.elasticsearch.xpack.sql.expression.function.scalar.datetime;
 
 import org.elasticsearch.xpack.sql.expression.Expression;
-import org.elasticsearch.xpack.sql.expression.Foldables;
 import org.elasticsearch.xpack.sql.session.Configuration;
 import org.elasticsearch.xpack.sql.tree.NodeInfo;
 import org.elasticsearch.xpack.sql.tree.Source;
@@ -15,6 +14,8 @@ import org.elasticsearch.xpack.sql.type.DataType;
 
 import java.time.ZonedDateTime;
 
+import static org.elasticsearch.xpack.sql.util.DateUtils.getNanoPrecision;
+
 public class CurrentDateTime extends CurrentFunction<ZonedDateTime> {
 
     private final Expression precision;
@@ -34,13 +35,6 @@ public class CurrentDateTime extends CurrentFunction<ZonedDateTime> {
     }
 
     static ZonedDateTime nanoPrecision(ZonedDateTime zdt, Expression precisionExpression) {
-        int precision = precisionExpression != null ? Foldables.intValueOf(precisionExpression) : 3;
-        int nano = zdt.getNano();
-        if (precision >= 0 && precision < 10) {
-            // remove the remainder
-            nano = nano - nano % (int) Math.pow(10, (9 - precision));
-            return zdt.withNano(nano);
-        }
-        return zdt;
+        return zdt.withNano(getNanoPrecision(precisionExpression, zdt.getNano()));
     }
 }
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentTime.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentTime.java
index 5ab2dd9e8b4..dac8216e0c5 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentTime.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentTime.java
@@ -7,7 +7,6 @@
 package org.elasticsearch.xpack.sql.expression.function.scalar.datetime;
 
 import org.elasticsearch.xpack.sql.expression.Expression;
-import org.elasticsearch.xpack.sql.expression.Foldables;
 import org.elasticsearch.xpack.sql.session.Configuration;
 import org.elasticsearch.xpack.sql.tree.NodeInfo;
 import org.elasticsearch.xpack.sql.tree.Source;
@@ -15,6 +14,8 @@ import org.elasticsearch.xpack.sql.type.DataType;
 
 import java.time.OffsetTime;
 
+import static org.elasticsearch.xpack.sql.util.DateUtils.getNanoPrecision;
+
 public class CurrentTime extends CurrentFunction<OffsetTime> {
 
     private final Expression precision;
@@ -35,13 +36,7 @@ public class CurrentTime extends CurrentFunction<OffsetTime> {
     }
 
     static OffsetTime nanoPrecision(OffsetTime ot, Expression precisionExpression) {
-        int precision = precisionExpression != null ? Foldables.intValueOf(precisionExpression) : 3;
-        int nano = ot.getNano();
-        if (precision >= 0 && precision < 10) {
-            // remove the remainder
-            nano = nano - nano % (int) Math.pow(10, (9 - precision));
-            return ot.withNano(nano);
-        }
-        return ot;
+        return ot.withNano(getNanoPrecision(precisionExpression, ot.getNano()));
     }
+
 }
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java
index 78f68342b69..ad2539ab99b 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java
@@ -109,7 +109,6 @@ import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ValueExpressionDefaultCo
 import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue;
 import org.elasticsearch.xpack.sql.tree.Source;
 import org.elasticsearch.xpack.sql.type.DataType;
-import org.elasticsearch.xpack.sql.type.DataTypeConversion;
 import org.elasticsearch.xpack.sql.type.DataTypes;
 import org.elasticsearch.xpack.sql.util.StringUtils;
 
@@ -431,30 +430,15 @@ abstract class ExpressionBuilder extends IdentifierBuilder {
         // maps CURRENT_XXX to its respective function e.g: CURRENT_TIMESTAMP()
         // since the functions need access to the Configuration, the parser only registers the definition and not the actual function
         Source source = source(ctx);
-        Literal p = null;
-
-        if (ctx.precision != null) {
-            try {
-                Source pSource = source(ctx.precision);
-                short safeShort = DataTypeConversion.safeToShort(StringUtils.parseLong(ctx.precision.getText()));
-                if (safeShort > 9 || safeShort < 0) {
-                    throw new ParsingException(pSource, "Precision needs to be between [0-9], received [{}]", safeShort);
-                }
-                p = Literal.of(pSource, Short.valueOf(safeShort));
-            } catch (SqlIllegalArgumentException siae) {
-                throw new ParsingException(source, siae.getMessage());
-            }
-        }
-        
         String functionName = ctx.name.getText();
 
         switch (ctx.name.getType()) {
             case SqlBaseLexer.CURRENT_TIMESTAMP:
-                return new UnresolvedFunction(source, functionName, ResolutionType.STANDARD, p != null ? singletonList(p) : emptyList());
+                return new UnresolvedFunction(source, functionName, ResolutionType.STANDARD, emptyList());
             case SqlBaseLexer.CURRENT_DATE:
                 return new UnresolvedFunction(source, functionName, ResolutionType.STANDARD, emptyList());
             case SqlBaseLexer.CURRENT_TIME:
-                return new UnresolvedFunction(source, functionName, ResolutionType.STANDARD, p != null ? singletonList(p) : emptyList());
+                return new UnresolvedFunction(source, functionName, ResolutionType.STANDARD, emptyList());
             default:
                 throw new ParsingException(source, "Unknown function [{}]", functionName);
         }
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java
index 5bad02b1392..6769cc88695 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java
@@ -1,13 +1,27 @@
 // ANTLR GENERATED CODE: DO NOT EDIT
 package org.elasticsearch.xpack.sql.parser;
-import org.antlr.v4.runtime.atn.*;
+
+import org.antlr.v4.runtime.FailedPredicateException;
+import org.antlr.v4.runtime.NoViableAltException;
+import org.antlr.v4.runtime.Parser;
+import org.antlr.v4.runtime.ParserRuleContext;
+import org.antlr.v4.runtime.RecognitionException;
+import org.antlr.v4.runtime.RuleContext;
+import org.antlr.v4.runtime.RuntimeMetaData;
+import org.antlr.v4.runtime.Token;
+import org.antlr.v4.runtime.TokenStream;
+import org.antlr.v4.runtime.Vocabulary;
+import org.antlr.v4.runtime.VocabularyImpl;
+import org.antlr.v4.runtime.atn.ATN;
+import org.antlr.v4.runtime.atn.ATNDeserializer;
+import org.antlr.v4.runtime.atn.ParserATNSimulator;
+import org.antlr.v4.runtime.atn.PredictionContextCache;
 import org.antlr.v4.runtime.dfa.DFA;
-import org.antlr.v4.runtime.*;
-import org.antlr.v4.runtime.misc.*;
-import org.antlr.v4.runtime.tree.*;
+import org.antlr.v4.runtime.tree.ParseTreeListener;
+import org.antlr.v4.runtime.tree.ParseTreeVisitor;
+import org.antlr.v4.runtime.tree.TerminalNode;
+
 import java.util.List;
-import java.util.Iterator;
-import java.util.ArrayList;
 
 @SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"})
 class SqlBaseParser extends Parser {
@@ -768,6 +782,9 @@ class SqlBaseParser extends Parser {
         case ANALYZED:
         case CATALOGS:
         case COLUMNS:
+        case CURRENT_DATE:
+        case CURRENT_TIME:
+        case CURRENT_TIMESTAMP:
         case DAY:
         case DEBUG:
         case EXECUTABLE:
@@ -844,6 +861,9 @@ class SqlBaseParser extends Parser {
         case ANALYZED:
         case CATALOGS:
         case COLUMNS:
+        case CURRENT_DATE:
+        case CURRENT_TIME:
+        case CURRENT_TIMESTAMP:
         case DAY:
         case DEBUG:
         case EXECUTABLE:
@@ -914,6 +934,9 @@ class SqlBaseParser extends Parser {
         case ANALYZED:
         case CATALOGS:
         case COLUMNS:
+        case CURRENT_DATE:
+        case CURRENT_TIME:
+        case CURRENT_TIMESTAMP:
         case DAY:
         case DEBUG:
         case EXECUTABLE:
@@ -1087,6 +1110,9 @@ class SqlBaseParser extends Parser {
         case ANALYZED:
         case CATALOGS:
         case COLUMNS:
+        case CURRENT_DATE:
+        case CURRENT_TIME:
+        case CURRENT_TIMESTAMP:
         case DAY:
         case DEBUG:
         case EXECUTABLE:
@@ -4241,7 +4267,7 @@ class SqlBaseParser extends Parser {
         _prevctx = _localctx;
         setState(577);
         _la = _input.LA(1);
-        if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OPTIMIZED - 64)) | (1L << (PARSED - 64)) | (1L << (PHYSICAL - 64)) | (1L << (PLAN - 64)) | (1L << (RLIKE - 64)) | (1L << (QUERY - 64)) | (1L << (SCHEMAS - 64)) | (1L << (SECOND - 64)) | (1L << (SHOW - 64)) | (1L << (SYS - 64)) | (1L << (TABLES - 64)) | (1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (YEAR - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) {
+        if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OPTIMIZED - 64)) | (1L << (PARSED - 64)) | (1L << (PHYSICAL - 64)) | (1L << (PLAN - 64)) | (1L << (RLIKE - 64)) | (1L << (QUERY - 64)) | (1L << (SCHEMAS - 64)) | (1L << (SECOND - 64)) | (1L << (SHOW - 64)) | (1L << (SYS - 64)) | (1L << (TABLES - 64)) | (1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (YEAR - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) {
           {
           setState(574);
           qualifiedName();
@@ -4339,9 +4365,7 @@ class SqlBaseParser extends Parser {
 
   public static class BuiltinDateTimeFunctionContext extends ParserRuleContext {
     public Token name;
-    public Token precision;
     public TerminalNode CURRENT_TIMESTAMP() { return getToken(SqlBaseParser.CURRENT_TIMESTAMP, 0); }
-    public TerminalNode INTEGER_VALUE() { return getToken(SqlBaseParser.INTEGER_VALUE, 0); }
     public TerminalNode CURRENT_DATE() { return getToken(SqlBaseParser.CURRENT_DATE, 0); }
     public TerminalNode CURRENT_TIME() { return getToken(SqlBaseParser.CURRENT_TIME, 0); }
     public BuiltinDateTimeFunctionContext(ParserRuleContext parent, int invokingState) {
@@ -4366,83 +4390,28 @@ class SqlBaseParser extends Parser {
   public final BuiltinDateTimeFunctionContext builtinDateTimeFunction() throws RecognitionException {
     BuiltinDateTimeFunctionContext _localctx = new BuiltinDateTimeFunctionContext(_ctx, getState());
     enterRule(_localctx, 62, RULE_builtinDateTimeFunction);
-    int _la;
     try {
-      setState(621);
+      setState(603);
       switch (_input.LA(1)) {
       case CURRENT_TIMESTAMP:
         enterOuterAlt(_localctx, 1);
         {
         setState(600);
         ((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_TIMESTAMP);
-        setState(606);
-        _errHandler.sync(this);
-        switch ( getInterpreter().adaptivePredict(_input,82,_ctx) ) {
-        case 1:
-          {
-          setState(601);
-          match(T__0);
-          setState(603);
-          _la = _input.LA(1);
-          if (_la==INTEGER_VALUE) {
-            {
-            setState(602);
-            ((BuiltinDateTimeFunctionContext)_localctx).precision = match(INTEGER_VALUE);
-            }
-          }
-
-          setState(605);
-          match(T__1);
-          }
-          break;
-        }
         }
         break;
       case CURRENT_DATE:
         enterOuterAlt(_localctx, 2);
         {
-        setState(608);
+        setState(601);
         ((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_DATE);
-        setState(611);
-        _errHandler.sync(this);
-        switch ( getInterpreter().adaptivePredict(_input,83,_ctx) ) {
-        case 1:
-          {
-          setState(609);
-          match(T__0);
-          setState(610);
-          match(T__1);
-          }
-          break;
-        }
         }
         break;
       case CURRENT_TIME:
         enterOuterAlt(_localctx, 3);
         {
-        setState(613);
+        setState(602);
         ((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_TIME);
-        setState(619);
-        _errHandler.sync(this);
-        switch ( getInterpreter().adaptivePredict(_input,85,_ctx) ) {
-        case 1:
-          {
-          setState(614);
-          match(T__0);
-          setState(616);
-          _la = _input.LA(1);
-          if (_la==INTEGER_VALUE) {
-            {
-            setState(615);
-            ((BuiltinDateTimeFunctionContext)_localctx).precision = match(INTEGER_VALUE);
-            }
-          }
-
-          setState(618);
-          match(T__1);
-          }
-          break;
-        }
         }
         break;
       default:
@@ -4492,42 +4461,42 @@ class SqlBaseParser extends Parser {
     CastExpressionContext _localctx = new CastExpressionContext(_ctx, getState());
     enterRule(_localctx, 64, RULE_castExpression);
     try {
-      setState(633);
+      setState(615);
       _errHandler.sync(this);
-      switch ( getInterpreter().adaptivePredict(_input,87,_ctx) ) {
+      switch ( getInterpreter().adaptivePredict(_input,82,_ctx) ) {
       case 1:
         enterOuterAlt(_localctx, 1);
         {
-        setState(623);
+        setState(605);
         castTemplate();
         }
         break;
       case 2:
         enterOuterAlt(_localctx, 2);
         {
-        setState(624);
+        setState(606);
         match(FUNCTION_ESC);
-        setState(625);
+        setState(607);
         castTemplate();
-        setState(626);
+        setState(608);
         match(ESC_END);
         }
         break;
       case 3:
         enterOuterAlt(_localctx, 3);
         {
-        setState(628);
+        setState(610);
         convertTemplate();
         }
         break;
       case 4:
         enterOuterAlt(_localctx, 4);
         {
-        setState(629);
+        setState(611);
         match(FUNCTION_ESC);
-        setState(630);
+        setState(612);
         convertTemplate();
-        setState(631);
+        setState(613);
         match(ESC_END);
         }
         break;
@@ -4578,17 +4547,17 @@ class SqlBaseParser extends Parser {
     try {
       enterOuterAlt(_localctx, 1);
       {
-      setState(635);
+      setState(617);
       match(CAST);
-      setState(636);
+      setState(618);
       match(T__0);
-      setState(637);
+      setState(619);
       expression();
-      setState(638);
+      setState(620);
       match(AS);
-      setState(639);
+      setState(621);
       dataType();
-      setState(640);
+      setState(622);
       match(T__1);
       }
     }
@@ -4636,17 +4605,17 @@ class SqlBaseParser extends Parser {
     try {
       enterOuterAlt(_localctx, 1);
       {
-      setState(642);
+      setState(624);
       match(CONVERT);
-      setState(643);
+      setState(625);
       match(T__0);
-      setState(644);
+      setState(626);
       expression();
-      setState(645);
+      setState(627);
       match(T__2);
-      setState(646);
+      setState(628);
       dataType();
-      setState(647);
+      setState(629);
       match(T__1);
       }
     }
@@ -4690,23 +4659,23 @@ class SqlBaseParser extends Parser {
     ExtractExpressionContext _localctx = new ExtractExpressionContext(_ctx, getState());
     enterRule(_localctx, 70, RULE_extractExpression);
     try {
-      setState(654);
+      setState(636);
       switch (_input.LA(1)) {
       case EXTRACT:
         enterOuterAlt(_localctx, 1);
         {
-        setState(649);
+        setState(631);
         extractTemplate();
         }
         break;
       case FUNCTION_ESC:
         enterOuterAlt(_localctx, 2);
         {
-        setState(650);
+        setState(632);
         match(FUNCTION_ESC);
-        setState(651);
+        setState(633);
         extractTemplate();
-        setState(652);
+        setState(634);
         match(ESC_END);
         }
         break;
@@ -4760,17 +4729,17 @@ class SqlBaseParser extends Parser {
     try {
       enterOuterAlt(_localctx, 1);
       {
-      setState(656);
+      setState(638);
       match(EXTRACT);
-      setState(657);
+      setState(639);
       match(T__0);
-      setState(658);
+      setState(640);
       ((ExtractTemplateContext)_localctx).field = identifier();
-      setState(659);
+      setState(641);
       match(FROM);
-      setState(660);
+      setState(642);
       valueExpression(0);
-      setState(661);
+      setState(643);
       match(T__1);
       }
     }
@@ -4813,12 +4782,15 @@ class SqlBaseParser extends Parser {
     FunctionExpressionContext _localctx = new FunctionExpressionContext(_ctx, getState());
     enterRule(_localctx, 74, RULE_functionExpression);
     try {
-      setState(668);
+      setState(650);
       switch (_input.LA(1)) {
       case ANALYZE:
       case ANALYZED:
       case CATALOGS:
       case COLUMNS:
+      case CURRENT_DATE:
+      case CURRENT_TIME:
+      case CURRENT_TIMESTAMP:
       case DAY:
       case DEBUG:
       case EXECUTABLE:
@@ -4859,18 +4831,18 @@ class SqlBaseParser extends Parser {
       case BACKQUOTED_IDENTIFIER:
         enterOuterAlt(_localctx, 1);
         {
-        setState(663);
+        setState(645);
         functionTemplate();
         }
         break;
       case FUNCTION_ESC:
         enterOuterAlt(_localctx, 2);
         {
-        setState(664);
+        setState(646);
         match(FUNCTION_ESC);
-        setState(665);
+        setState(647);
         functionTemplate();
-        setState(666);
+        setState(648);
         match(ESC_END);
         }
         break;
@@ -4928,45 +4900,45 @@ class SqlBaseParser extends Parser {
     try {
       enterOuterAlt(_localctx, 1);
       {
-      setState(670);
+      setState(652);
       functionName();
-      setState(671);
+      setState(653);
       match(T__0);
-      setState(683);
+      setState(665);
       _la = _input.LA(1);
       if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ALL) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << DISTINCT) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH) | (1L << NOT) | (1L << NULL))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OPTIMIZED - 64)) | (1L << (PARSED - 64)) | (1L << (PHYSICAL - 64)) | (1L << (PLAN - 64)) | (1L << (RIGHT - 64)) | (1L << (RLIKE - 64)) | (1L << (QUERY - 64)) | (1L << (SCHEMAS - 64)) | (1L << (SECOND - 64)) | (1L << (SHOW - 64)) | (1L << (SYS - 64)) | (1L << (TABLES - 64)) | (1L << (TEXT - 64)) | (1L << (TRUE - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (YEAR - 64)) | (1L << (FUNCTION_ESC - 64)) | (1L << (DATE_ESC - 64)) | (1L << (TIME_ESC - 64)) | (1L << (TIMESTAMP_ESC - 64)) | (1L << (GUID_ESC - 64)) | (1L << (PLUS - 64)) | (1L << (MINUS - 64)) | (1L << (ASTERISK - 64)) | (1L << (PARAM - 64)) | (1L << (STRING - 64)) | (1L << (INTEGER_VALUE - 64)) | (1L << (DECIMAL_VALUE - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) {
         {
-        setState(673);
+        setState(655);
         _la = _input.LA(1);
         if (_la==ALL || _la==DISTINCT) {
           {
-          setState(672);
+          setState(654);
           setQuantifier();
           }
         }
 
-        setState(675);
+        setState(657);
         expression();
-        setState(680);
+        setState(662);
         _errHandler.sync(this);
         _la = _input.LA(1);
         while (_la==T__2) {
           {
           {
-          setState(676);
+          setState(658);
           match(T__2);
-          setState(677);
+          setState(659);
           expression();
           }
           }
-          setState(682);
+          setState(664);
           _errHandler.sync(this);
           _la = _input.LA(1);
         }
         }
       }
 
-      setState(685);
+      setState(667);
       match(T__1);
       }
     }
@@ -5010,19 +4982,19 @@ class SqlBaseParser extends Parser {
     FunctionNameContext _localctx = new FunctionNameContext(_ctx, getState());
     enterRule(_localctx, 78, RULE_functionName);
     try {
-      setState(690);
+      setState(672);
       switch (_input.LA(1)) {
       case LEFT:
         enterOuterAlt(_localctx, 1);
         {
-        setState(687);
+        setState(669);
         match(LEFT);
         }
         break;
       case RIGHT:
         enterOuterAlt(_localctx, 2);
         {
-        setState(688);
+        setState(670);
         match(RIGHT);
         }
         break;
@@ -5030,6 +5002,9 @@ class SqlBaseParser extends Parser {
       case ANALYZED:
       case CATALOGS:
       case COLUMNS:
+      case CURRENT_DATE:
+      case CURRENT_TIME:
+      case CURRENT_TIMESTAMP:
       case DAY:
       case DEBUG:
       case EXECUTABLE:
@@ -5068,7 +5043,7 @@ class SqlBaseParser extends Parser {
       case BACKQUOTED_IDENTIFIER:
         enterOuterAlt(_localctx, 3);
         {
-        setState(689);
+        setState(671);
         identifier();
         }
         break;
@@ -5299,13 +5274,13 @@ class SqlBaseParser extends Parser {
     enterRule(_localctx, 80, RULE_constant);
     try {
       int _alt;
-      setState(718);
+      setState(700);
       switch (_input.LA(1)) {
       case NULL:
         _localctx = new NullLiteralContext(_localctx);
         enterOuterAlt(_localctx, 1);
         {
-        setState(692);
+        setState(674);
         match(NULL);
         }
         break;
@@ -5313,7 +5288,7 @@ class SqlBaseParser extends Parser {
         _localctx = new IntervalLiteralContext(_localctx);
         enterOuterAlt(_localctx, 2);
         {
-        setState(693);
+        setState(675);
         interval();
         }
         break;
@@ -5322,7 +5297,7 @@ class SqlBaseParser extends Parser {
         _localctx = new NumericLiteralContext(_localctx);
         enterOuterAlt(_localctx, 3);
         {
-        setState(694);
+        setState(676);
         number();
         }
         break;
@@ -5331,7 +5306,7 @@ class SqlBaseParser extends Parser {
         _localctx = new BooleanLiteralContext(_localctx);
         enterOuterAlt(_localctx, 4);
         {
-        setState(695);
+        setState(677);
         booleanValue();
         }
         break;
@@ -5339,7 +5314,7 @@ class SqlBaseParser extends Parser {
         _localctx = new StringLiteralContext(_localctx);
         enterOuterAlt(_localctx, 5);
         {
-        setState(697); 
+        setState(679); 
         _errHandler.sync(this);
         _alt = 1;
         do {
@@ -5347,7 +5322,7 @@ class SqlBaseParser extends Parser {
           case 1:
             {
             {
-            setState(696);
+            setState(678);
             match(STRING);
             }
             }
@@ -5355,9 +5330,9 @@ class SqlBaseParser extends Parser {
           default:
             throw new NoViableAltException(this);
           }
-          setState(699); 
+          setState(681); 
           _errHandler.sync(this);
-          _alt = getInterpreter().adaptivePredict(_input,94,_ctx);
+          _alt = getInterpreter().adaptivePredict(_input,89,_ctx);
         } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER );
         }
         break;
@@ -5365,7 +5340,7 @@ class SqlBaseParser extends Parser {
         _localctx = new ParamLiteralContext(_localctx);
         enterOuterAlt(_localctx, 6);
         {
-        setState(701);
+        setState(683);
         match(PARAM);
         }
         break;
@@ -5373,11 +5348,11 @@ class SqlBaseParser extends Parser {
         _localctx = new DateEscapedLiteralContext(_localctx);
         enterOuterAlt(_localctx, 7);
         {
-        setState(702);
+        setState(684);
         match(DATE_ESC);
-        setState(703);
+        setState(685);
         string();
-        setState(704);
+        setState(686);
         match(ESC_END);
         }
         break;
@@ -5385,11 +5360,11 @@ class SqlBaseParser extends Parser {
         _localctx = new TimeEscapedLiteralContext(_localctx);
         enterOuterAlt(_localctx, 8);
         {
-        setState(706);
+        setState(688);
         match(TIME_ESC);
-        setState(707);
+        setState(689);
         string();
-        setState(708);
+        setState(690);
         match(ESC_END);
         }
         break;
@@ -5397,11 +5372,11 @@ class SqlBaseParser extends Parser {
         _localctx = new TimestampEscapedLiteralContext(_localctx);
         enterOuterAlt(_localctx, 9);
         {
-        setState(710);
+        setState(692);
         match(TIMESTAMP_ESC);
-        setState(711);
+        setState(693);
         string();
-        setState(712);
+        setState(694);
         match(ESC_END);
         }
         break;
@@ -5409,11 +5384,11 @@ class SqlBaseParser extends Parser {
         _localctx = new GuidEscapedLiteralContext(_localctx);
         enterOuterAlt(_localctx, 10);
         {
-        setState(714);
+        setState(696);
         match(GUID_ESC);
-        setState(715);
+        setState(697);
         string();
-        setState(716);
+        setState(698);
         match(ESC_END);
         }
         break;
@@ -5466,7 +5441,7 @@ class SqlBaseParser extends Parser {
     try {
       enterOuterAlt(_localctx, 1);
       {
-      setState(720);
+      setState(702);
       _la = _input.LA(1);
       if ( !(((((_la - 101)) & ~0x3f) == 0 && ((1L << (_la - 101)) & ((1L << (EQ - 101)) | (1L << (NULLEQ - 101)) | (1L << (NEQ - 101)) | (1L << (LT - 101)) | (1L << (LTE - 101)) | (1L << (GT - 101)) | (1L << (GTE - 101)))) != 0)) ) {
       _errHandler.recoverInline(this);
@@ -5515,7 +5490,7 @@ class SqlBaseParser extends Parser {
     try {
       enterOuterAlt(_localctx, 1);
       {
-      setState(722);
+      setState(704);
       _la = _input.LA(1);
       if ( !(_la==FALSE || _la==TRUE) ) {
       _errHandler.recoverInline(this);
@@ -5583,13 +5558,13 @@ class SqlBaseParser extends Parser {
     try {
       enterOuterAlt(_localctx, 1);
       {
-      setState(724);
+      setState(706);
       match(INTERVAL);
-      setState(726);
+      setState(708);
       _la = _input.LA(1);
       if (_la==PLUS || _la==MINUS) {
         {
-        setState(725);
+        setState(707);
         ((IntervalContext)_localctx).sign = _input.LT(1);
         _la = _input.LA(1);
         if ( !(_la==PLUS || _la==MINUS) ) {
@@ -5600,35 +5575,35 @@ class SqlBaseParser extends Parser {
         }
       }
 
-      setState(730);
+      setState(712);
       switch (_input.LA(1)) {
       case INTEGER_VALUE:
       case DECIMAL_VALUE:
         {
-        setState(728);
+        setState(710);
         ((IntervalContext)_localctx).valueNumeric = number();
         }
         break;
       case PARAM:
       case STRING:
         {
-        setState(729);
+        setState(711);
         ((IntervalContext)_localctx).valuePattern = string();
         }
         break;
       default:
         throw new NoViableAltException(this);
       }
-      setState(732);
+      setState(714);
       ((IntervalContext)_localctx).leading = intervalField();
-      setState(735);
+      setState(717);
       _errHandler.sync(this);
-      switch ( getInterpreter().adaptivePredict(_input,98,_ctx) ) {
+      switch ( getInterpreter().adaptivePredict(_input,93,_ctx) ) {
       case 1:
         {
-        setState(733);
+        setState(715);
         match(TO);
-        setState(734);
+        setState(716);
         ((IntervalContext)_localctx).trailing = intervalField();
         }
         break;
@@ -5685,7 +5660,7 @@ class SqlBaseParser extends Parser {
     try {
       enterOuterAlt(_localctx, 1);
       {
-      setState(737);
+      setState(719);
       _la = _input.LA(1);
       if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << DAY) | (1L << DAYS) | (1L << HOUR) | (1L << HOURS) | (1L << MINUTE) | (1L << MINUTES) | (1L << MONTH) | (1L << MONTHS))) != 0) || ((((_la - 75)) & ~0x3f) == 0 && ((1L << (_la - 75)) & ((1L << (SECOND - 75)) | (1L << (SECONDS - 75)) | (1L << (YEAR - 75)) | (1L << (YEARS - 75)))) != 0)) ) {
       _errHandler.recoverInline(this);
@@ -5743,7 +5718,7 @@ class SqlBaseParser extends Parser {
       _localctx = new PrimitiveDataTypeContext(_localctx);
       enterOuterAlt(_localctx, 1);
       {
-      setState(739);
+      setState(721);
       identifier();
       }
     }
@@ -5795,25 +5770,25 @@ class SqlBaseParser extends Parser {
       int _alt;
       enterOuterAlt(_localctx, 1);
       {
-      setState(746);
+      setState(728);
       _errHandler.sync(this);
-      _alt = getInterpreter().adaptivePredict(_input,99,_ctx);
+      _alt = getInterpreter().adaptivePredict(_input,94,_ctx);
       while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) {
         if ( _alt==1 ) {
           {
           {
-          setState(741);
+          setState(723);
           identifier();
-          setState(742);
+          setState(724);
           match(DOT);
           }
           } 
         }
-        setState(748);
+        setState(730);
         _errHandler.sync(this);
-        _alt = getInterpreter().adaptivePredict(_input,99,_ctx);
+        _alt = getInterpreter().adaptivePredict(_input,94,_ctx);
       }
-      setState(749);
+      setState(731);
       identifier();
       }
     }
@@ -5858,13 +5833,13 @@ class SqlBaseParser extends Parser {
     IdentifierContext _localctx = new IdentifierContext(_ctx, getState());
     enterRule(_localctx, 94, RULE_identifier);
     try {
-      setState(753);
+      setState(735);
       switch (_input.LA(1)) {
       case QUOTED_IDENTIFIER:
       case BACKQUOTED_IDENTIFIER:
         enterOuterAlt(_localctx, 1);
         {
-        setState(751);
+        setState(733);
         quoteIdentifier();
         }
         break;
@@ -5872,6 +5847,9 @@ class SqlBaseParser extends Parser {
       case ANALYZED:
       case CATALOGS:
       case COLUMNS:
+      case CURRENT_DATE:
+      case CURRENT_TIME:
+      case CURRENT_TIMESTAMP:
       case DAY:
       case DEBUG:
       case EXECUTABLE:
@@ -5908,7 +5886,7 @@ class SqlBaseParser extends Parser {
       case DIGIT_IDENTIFIER:
         enterOuterAlt(_localctx, 2);
         {
-        setState(752);
+        setState(734);
         unquoteIdentifier();
         }
         break;
@@ -5961,43 +5939,43 @@ class SqlBaseParser extends Parser {
     enterRule(_localctx, 96, RULE_tableIdentifier);
     int _la;
     try {
-      setState(767);
+      setState(749);
       _errHandler.sync(this);
-      switch ( getInterpreter().adaptivePredict(_input,103,_ctx) ) {
+      switch ( getInterpreter().adaptivePredict(_input,98,_ctx) ) {
       case 1:
         enterOuterAlt(_localctx, 1);
         {
-        setState(758);
+        setState(740);
         _la = _input.LA(1);
-        if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OPTIMIZED - 64)) | (1L << (PARSED - 64)) | (1L << (PHYSICAL - 64)) | (1L << (PLAN - 64)) | (1L << (RLIKE - 64)) | (1L << (QUERY - 64)) | (1L << (SCHEMAS - 64)) | (1L << (SECOND - 64)) | (1L << (SHOW - 64)) | (1L << (SYS - 64)) | (1L << (TABLES - 64)) | (1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (YEAR - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) {
+        if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OPTIMIZED - 64)) | (1L << (PARSED - 64)) | (1L << (PHYSICAL - 64)) | (1L << (PLAN - 64)) | (1L << (RLIKE - 64)) | (1L << (QUERY - 64)) | (1L << (SCHEMAS - 64)) | (1L << (SECOND - 64)) | (1L << (SHOW - 64)) | (1L << (SYS - 64)) | (1L << (TABLES - 64)) | (1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (YEAR - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) {
           {
-          setState(755);
+          setState(737);
           ((TableIdentifierContext)_localctx).catalog = identifier();
-          setState(756);
+          setState(738);
           match(T__3);
           }
         }
 
-        setState(760);
+        setState(742);
         match(TABLE_IDENTIFIER);
         }
         break;
       case 2:
         enterOuterAlt(_localctx, 2);
         {
-        setState(764);
+        setState(746);
         _errHandler.sync(this);
-        switch ( getInterpreter().adaptivePredict(_input,102,_ctx) ) {
+        switch ( getInterpreter().adaptivePredict(_input,97,_ctx) ) {
         case 1:
           {
-          setState(761);
+          setState(743);
           ((TableIdentifierContext)_localctx).catalog = identifier();
-          setState(762);
+          setState(744);
           match(T__3);
           }
           break;
         }
-        setState(766);
+        setState(748);
         ((TableIdentifierContext)_localctx).name = identifier();
         }
         break;
@@ -6064,13 +6042,13 @@ class SqlBaseParser extends Parser {
     QuoteIdentifierContext _localctx = new QuoteIdentifierContext(_ctx, getState());
     enterRule(_localctx, 98, RULE_quoteIdentifier);
     try {
-      setState(771);
+      setState(753);
       switch (_input.LA(1)) {
       case QUOTED_IDENTIFIER:
         _localctx = new QuotedIdentifierContext(_localctx);
         enterOuterAlt(_localctx, 1);
         {
-        setState(769);
+        setState(751);
         match(QUOTED_IDENTIFIER);
         }
         break;
@@ -6078,7 +6056,7 @@ class SqlBaseParser extends Parser {
         _localctx = new BackQuotedIdentifierContext(_localctx);
         enterOuterAlt(_localctx, 2);
         {
-        setState(770);
+        setState(752);
         match(BACKQUOTED_IDENTIFIER);
         }
         break;
@@ -6150,13 +6128,13 @@ class SqlBaseParser extends Parser {
     UnquoteIdentifierContext _localctx = new UnquoteIdentifierContext(_ctx, getState());
     enterRule(_localctx, 100, RULE_unquoteIdentifier);
     try {
-      setState(776);
+      setState(758);
       switch (_input.LA(1)) {
       case IDENTIFIER:
         _localctx = new UnquotedIdentifierContext(_localctx);
         enterOuterAlt(_localctx, 1);
         {
-        setState(773);
+        setState(755);
         match(IDENTIFIER);
         }
         break;
@@ -6164,6 +6142,9 @@ class SqlBaseParser extends Parser {
       case ANALYZED:
       case CATALOGS:
       case COLUMNS:
+      case CURRENT_DATE:
+      case CURRENT_TIME:
+      case CURRENT_TIMESTAMP:
       case DAY:
       case DEBUG:
       case EXECUTABLE:
@@ -6199,7 +6180,7 @@ class SqlBaseParser extends Parser {
         _localctx = new UnquotedIdentifierContext(_localctx);
         enterOuterAlt(_localctx, 2);
         {
-        setState(774);
+        setState(756);
         nonReserved();
         }
         break;
@@ -6207,7 +6188,7 @@ class SqlBaseParser extends Parser {
         _localctx = new DigitIdentifierContext(_localctx);
         enterOuterAlt(_localctx, 3);
         {
-        setState(775);
+        setState(757);
         match(DIGIT_IDENTIFIER);
         }
         break;
@@ -6276,13 +6257,13 @@ class SqlBaseParser extends Parser {
     NumberContext _localctx = new NumberContext(_ctx, getState());
     enterRule(_localctx, 102, RULE_number);
     try {
-      setState(780);
+      setState(762);
       switch (_input.LA(1)) {
       case DECIMAL_VALUE:
         _localctx = new DecimalLiteralContext(_localctx);
         enterOuterAlt(_localctx, 1);
         {
-        setState(778);
+        setState(760);
         match(DECIMAL_VALUE);
         }
         break;
@@ -6290,7 +6271,7 @@ class SqlBaseParser extends Parser {
         _localctx = new IntegerLiteralContext(_localctx);
         enterOuterAlt(_localctx, 2);
         {
-        setState(779);
+        setState(761);
         match(INTEGER_VALUE);
         }
         break;
@@ -6338,7 +6319,7 @@ class SqlBaseParser extends Parser {
     try {
       enterOuterAlt(_localctx, 1);
       {
-      setState(782);
+      setState(764);
       _la = _input.LA(1);
       if ( !(_la==PARAM || _la==STRING) ) {
       _errHandler.recoverInline(this);
@@ -6363,6 +6344,9 @@ class SqlBaseParser extends Parser {
     public TerminalNode ANALYZED() { return getToken(SqlBaseParser.ANALYZED, 0); }
     public TerminalNode CATALOGS() { return getToken(SqlBaseParser.CATALOGS, 0); }
     public TerminalNode COLUMNS() { return getToken(SqlBaseParser.COLUMNS, 0); }
+    public TerminalNode CURRENT_DATE() { return getToken(SqlBaseParser.CURRENT_DATE, 0); }
+    public TerminalNode CURRENT_TIME() { return getToken(SqlBaseParser.CURRENT_TIME, 0); }
+    public TerminalNode CURRENT_TIMESTAMP() { return getToken(SqlBaseParser.CURRENT_TIMESTAMP, 0); }
     public TerminalNode DAY() { return getToken(SqlBaseParser.DAY, 0); }
     public TerminalNode DEBUG() { return getToken(SqlBaseParser.DEBUG, 0); }
     public TerminalNode EXECUTABLE() { return getToken(SqlBaseParser.EXECUTABLE, 0); }
@@ -6421,9 +6405,9 @@ class SqlBaseParser extends Parser {
     try {
       enterOuterAlt(_localctx, 1);
       {
-      setState(784);
+      setState(766);
       _la = _input.LA(1);
-      if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OPTIMIZED - 64)) | (1L << (PARSED - 64)) | (1L << (PHYSICAL - 64)) | (1L << (PLAN - 64)) | (1L << (RLIKE - 64)) | (1L << (QUERY - 64)) | (1L << (SCHEMAS - 64)) | (1L << (SECOND - 64)) | (1L << (SHOW - 64)) | (1L << (SYS - 64)) | (1L << (TABLES - 64)) | (1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (YEAR - 64)))) != 0)) ) {
+      if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OPTIMIZED - 64)) | (1L << (PARSED - 64)) | (1L << (PHYSICAL - 64)) | (1L << (PLAN - 64)) | (1L << (RLIKE - 64)) | (1L << (QUERY - 64)) | (1L << (SCHEMAS - 64)) | (1L << (SECOND - 64)) | (1L << (SHOW - 64)) | (1L << (SYS - 64)) | (1L << (TABLES - 64)) | (1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (YEAR - 64)))) != 0)) ) {
       _errHandler.recoverInline(this);
       } else {
         consume();
@@ -6481,7 +6465,7 @@ class SqlBaseParser extends Parser {
   }
 
   public static final String _serializedATN =
-    "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3\u0083\u0315\4\2\t"+
+    "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3\u0083\u0303\4\2\t"+
     "\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13"+
     "\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+
     "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+
@@ -6528,115 +6512,114 @@ class SqlBaseParser extends Parser {
     "\n\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\7\37\u0237\n\37"+
     "\f\37\16\37\u023a\13\37\3 \3 \3 \3 \3 \3 \3 \3 \5 \u0244\n \3 \3 \3 \3"+
     " \3 \3 \3 \3 \3 \3 \3 \5 \u0251\n \3 \3 \3 \7 \u0256\n \f \16 \u0259\13"+
-    " \3!\3!\3!\5!\u025e\n!\3!\5!\u0261\n!\3!\3!\3!\5!\u0266\n!\3!\3!\3!\5"+
-    "!\u026b\n!\3!\5!\u026e\n!\5!\u0270\n!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\""+
-    "\3\"\3\"\5\"\u027c\n\"\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3$\3%\3"+
-    "%\3%\3%\3%\5%\u0291\n%\3&\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\5\'\u029f"+
-    "\n\'\3(\3(\3(\5(\u02a4\n(\3(\3(\3(\7(\u02a9\n(\f(\16(\u02ac\13(\5(\u02ae"+
-    "\n(\3(\3(\3)\3)\3)\5)\u02b5\n)\3*\3*\3*\3*\3*\6*\u02bc\n*\r*\16*\u02bd"+
-    "\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\5*\u02d1\n*\3+\3+"+
-    "\3,\3,\3-\3-\5-\u02d9\n-\3-\3-\5-\u02dd\n-\3-\3-\3-\5-\u02e2\n-\3.\3."+
-    "\3/\3/\3\60\3\60\3\60\7\60\u02eb\n\60\f\60\16\60\u02ee\13\60\3\60\3\60"+
-    "\3\61\3\61\5\61\u02f4\n\61\3\62\3\62\3\62\5\62\u02f9\n\62\3\62\3\62\3"+
-    "\62\3\62\5\62\u02ff\n\62\3\62\5\62\u0302\n\62\3\63\3\63\5\63\u0306\n\63"+
-    "\3\64\3\64\3\64\5\64\u030b\n\64\3\65\3\65\5\65\u030f\n\65\3\66\3\66\3"+
-    "\67\3\67\3\67\2\5.<>8\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,."+
-    "\60\62\64\668:<>@BDFHJLNPRTVXZ\\^`bdfhjl\2\22\b\2\7\7\t\t\37\37\67\67"+
-    "BBFF\4\2))TT\4\2\t\tBB\4\2&&..\3\2\33\34\3\2no\4\2\7\7xx\4\2\r\r\33\33"+
-    "\4\2$$\63\63\4\2\7\7\35\35\3\2pr\3\2gm\4\2##UU\7\2\30\31,-9<MN]^\3\2v"+
-    "w\30\2\b\t\22\23\30\30\32\32\37\37!!$%\'),,\60\60\63\63\66\6799;;BBFH"+
-    "JMPQSTWXZZ]]\u0376\2n\3\2\2\2\4q\3\2\2\2\6\u00d9\3\2\2\2\b\u00e4\3\2\2"+
-    "\2\n\u00e8\3\2\2\2\f\u00fd\3\2\2\2\16\u0104\3\2\2\2\20\u0106\3\2\2\2\22"+
-    "\u010e\3\2\2\2\24\u012a\3\2\2\2\26\u0134\3\2\2\2\30\u013e\3\2\2\2\32\u014d"+
-    "\3\2\2\2\34\u014f\3\2\2\2\36\u0155\3\2\2\2 \u0157\3\2\2\2\"\u015e\3\2"+
-    "\2\2$\u0170\3\2\2\2&\u0181\3\2\2\2(\u0191\3\2\2\2*\u01ac\3\2\2\2,\u01ae"+
-    "\3\2\2\2.\u01cf\3\2\2\2\60\u01e0\3\2\2\2\62\u01e3\3\2\2\2\64\u0215\3\2"+
-    "\2\2\66\u0217\3\2\2\28\u021a\3\2\2\2:\u0224\3\2\2\2<\u022a\3\2\2\2>\u0250"+
-    "\3\2\2\2@\u026f\3\2\2\2B\u027b\3\2\2\2D\u027d\3\2\2\2F\u0284\3\2\2\2H"+
-    "\u0290\3\2\2\2J\u0292\3\2\2\2L\u029e\3\2\2\2N\u02a0\3\2\2\2P\u02b4\3\2"+
-    "\2\2R\u02d0\3\2\2\2T\u02d2\3\2\2\2V\u02d4\3\2\2\2X\u02d6\3\2\2\2Z\u02e3"+
-    "\3\2\2\2\\\u02e5\3\2\2\2^\u02ec\3\2\2\2`\u02f3\3\2\2\2b\u0301\3\2\2\2"+
-    "d\u0305\3\2\2\2f\u030a\3\2\2\2h\u030e\3\2\2\2j\u0310\3\2\2\2l\u0312\3"+
-    "\2\2\2no\5\6\4\2op\7\2\2\3p\3\3\2\2\2qr\5,\27\2rs\7\2\2\3s\5\3\2\2\2t"+
-    "\u00da\5\b\5\2u\u0083\7!\2\2v\177\7\3\2\2wx\7H\2\2x~\t\2\2\2yz\7%\2\2"+
-    "z~\t\3\2\2{|\7Z\2\2|~\5V,\2}w\3\2\2\2}y\3\2\2\2}{\3\2\2\2~\u0081\3\2\2"+
-    "\2\177}\3\2\2\2\177\u0080\3\2\2\2\u0080\u0082\3\2\2\2\u0081\177\3\2\2"+
-    "\2\u0082\u0084\7\4\2\2\u0083v\3\2\2\2\u0083\u0084\3\2\2\2\u0084\u0085"+
-    "\3\2\2\2\u0085\u00da\5\6\4\2\u0086\u0092\7\32\2\2\u0087\u008e\7\3\2\2"+
-    "\u0088\u0089\7H\2\2\u0089\u008d\t\4\2\2\u008a\u008b\7%\2\2\u008b\u008d"+
-    "\t\3\2\2\u008c\u0088\3\2\2\2\u008c\u008a\3\2\2\2\u008d\u0090\3\2\2\2\u008e"+
-    "\u008c\3\2\2\2\u008e\u008f\3\2\2\2\u008f\u0091\3\2\2\2\u0090\u008e\3\2"+
-    "\2\2\u0091\u0093\7\4\2\2\u0092\u0087\3\2\2\2\u0092\u0093\3\2\2\2\u0093"+
-    "\u0094\3\2\2\2\u0094\u00da\5\6\4\2\u0095\u0096\7P\2\2\u0096\u0099\7S\2"+
-    "\2\u0097\u009a\5\66\34\2\u0098\u009a\5b\62\2\u0099\u0097\3\2\2\2\u0099"+
-    "\u0098\3\2\2\2\u0099\u009a\3\2\2\2\u009a\u00da\3\2\2\2\u009b\u009c\7P"+
-    "\2\2\u009c\u009d\7\23\2\2\u009d\u00a0\t\5\2\2\u009e\u00a1\5\66\34\2\u009f"+
-    "\u00a1\5b\62\2\u00a0\u009e\3\2\2\2\u00a0\u009f\3\2\2\2\u00a1\u00da\3\2"+
-    "\2\2\u00a2\u00a5\t\6\2\2\u00a3\u00a6\5\66\34\2\u00a4\u00a6\5b\62\2\u00a5"+
-    "\u00a3\3\2\2\2\u00a5\u00a4\3\2\2\2\u00a6\u00da\3\2\2\2\u00a7\u00a8\7P"+
-    "\2\2\u00a8\u00aa\7(\2\2\u00a9\u00ab\5\66\34\2\u00aa\u00a9\3\2\2\2\u00aa"+
-    "\u00ab\3\2\2\2\u00ab\u00da\3\2\2\2\u00ac\u00ad\7P\2\2\u00ad\u00da\7L\2"+
-    "\2\u00ae\u00af\7Q\2\2\u00af\u00b2\7S\2\2\u00b0\u00b1\7\21\2\2\u00b1\u00b3"+
-    "\5\66\34\2\u00b2\u00b0\3\2\2\2\u00b2\u00b3\3\2\2\2\u00b3\u00b6\3\2\2\2"+
-    "\u00b4\u00b7\5\66\34\2\u00b5\u00b7\5b\62\2\u00b6\u00b4\3\2\2\2\u00b6\u00b5"+
-    "\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00c1\3\2\2\2\u00b8\u00b9\7W\2\2\u00b9"+
-    "\u00be\5j\66\2\u00ba\u00bb\7\5\2\2\u00bb\u00bd\5j\66\2\u00bc\u00ba\3\2"+
-    "\2\2\u00bd\u00c0\3\2\2\2\u00be\u00bc\3\2\2\2\u00be\u00bf\3\2\2\2\u00bf"+
-    "\u00c2\3\2\2\2\u00c0\u00be\3\2\2\2\u00c1\u00b8\3\2\2\2\u00c1\u00c2\3\2"+
-    "\2\2\u00c2\u00da\3\2\2\2\u00c3\u00c4\7Q\2\2\u00c4\u00c7\7\23\2\2\u00c5"+
-    "\u00c6\7\21\2\2\u00c6\u00c8\5j\66\2\u00c7\u00c5\3\2\2\2\u00c7\u00c8\3"+
-    "\2\2\2\u00c8\u00cc\3\2\2\2\u00c9\u00ca\7R\2\2\u00ca\u00cd\5\66\34\2\u00cb"+
-    "\u00cd\5b\62\2\u00cc\u00c9\3\2\2\2\u00cc\u00cb\3\2\2\2\u00cc\u00cd\3\2"+
-    "\2\2\u00cd\u00cf\3\2\2\2\u00ce\u00d0\5\66\34\2\u00cf\u00ce\3\2\2\2\u00cf"+
-    "\u00d0\3\2\2\2\u00d0\u00da\3\2\2\2\u00d1\u00d2\7Q\2\2\u00d2\u00d7\7X\2"+
-    "\2\u00d3\u00d5\t\7\2\2\u00d4\u00d3\3\2\2\2\u00d4\u00d5\3\2\2\2\u00d5\u00d6"+
-    "\3\2\2\2\u00d6\u00d8\5h\65\2\u00d7\u00d4\3\2\2\2\u00d7\u00d8\3\2\2\2\u00d8"+
-    "\u00da\3\2\2\2\u00d9t\3\2\2\2\u00d9u\3\2\2\2\u00d9\u0086\3\2\2\2\u00d9"+
-    "\u0095\3\2\2\2\u00d9\u009b\3\2\2\2\u00d9\u00a2\3\2\2\2\u00d9\u00a7\3\2"+
-    "\2\2\u00d9\u00ac\3\2\2\2\u00d9\u00ae\3\2\2\2\u00d9\u00c3\3\2\2\2\u00d9"+
-    "\u00d1\3\2\2\2\u00da\7\3\2\2\2\u00db\u00dc\7\\\2\2\u00dc\u00e1\5\34\17"+
-    "\2\u00dd\u00de\7\5\2\2\u00de\u00e0\5\34\17\2\u00df\u00dd\3\2\2\2\u00e0"+
-    "\u00e3\3\2\2\2\u00e1\u00df\3\2\2\2\u00e1\u00e2\3\2\2\2\u00e2\u00e5\3\2"+
-    "\2\2\u00e3\u00e1\3\2\2\2\u00e4\u00db\3\2\2\2\u00e4\u00e5\3\2\2\2\u00e5"+
-    "\u00e6\3\2\2\2\u00e6\u00e7\5\n\6\2\u00e7\t\3\2\2\2\u00e8\u00f3\5\16\b"+
-    "\2\u00e9\u00ea\7D\2\2\u00ea\u00eb\7\17\2\2\u00eb\u00f0\5\20\t\2\u00ec"+
-    "\u00ed\7\5\2\2\u00ed\u00ef\5\20\t\2\u00ee\u00ec\3\2\2\2\u00ef\u00f2\3"+
-    "\2\2\2\u00f0\u00ee\3\2\2\2\u00f0\u00f1\3\2\2\2\u00f1\u00f4\3\2\2\2\u00f2"+
-    "\u00f0\3\2\2\2\u00f3\u00e9\3\2\2\2\u00f3\u00f4\3\2\2\2\u00f4\u00f6\3\2"+
-    "\2\2\u00f5\u00f7\5\f\7\2\u00f6\u00f5\3\2\2\2\u00f6\u00f7\3\2\2\2\u00f7"+
-    "\13\3\2\2\2\u00f8\u00f9\7\66\2\2\u00f9\u00fe\t\b\2\2\u00fa\u00fb\7a\2"+
-    "\2\u00fb\u00fc\t\b\2\2\u00fc\u00fe\7f\2\2\u00fd\u00f8\3\2\2\2\u00fd\u00fa"+
-    "\3\2\2\2\u00fe\r\3\2\2\2\u00ff\u0105\5\22\n\2\u0100\u0101\7\3\2\2\u0101"+
-    "\u0102\5\n\6\2\u0102\u0103\7\4\2\2\u0103\u0105\3\2\2\2\u0104\u00ff\3\2"+
-    "\2\2\u0104\u0100\3\2\2\2\u0105\17\3\2\2\2\u0106\u0108\5,\27\2\u0107\u0109"+
-    "\t\t\2\2\u0108\u0107\3\2\2\2\u0108\u0109\3\2\2\2\u0109\u010c\3\2\2\2\u010a"+
-    "\u010b\7@\2\2\u010b\u010d\t\n\2\2\u010c\u010a\3\2\2\2\u010c\u010d\3\2"+
-    "\2\2\u010d\21\3\2\2\2\u010e\u0110\7O\2\2\u010f\u0111\5\36\20\2\u0110\u010f"+
-    "\3\2\2\2\u0110\u0111\3\2\2\2\u0111\u0112\3\2\2\2\u0112\u0117\5 \21\2\u0113"+
-    "\u0114\7\5\2\2\u0114\u0116\5 \21\2\u0115\u0113\3\2\2\2\u0116\u0119\3\2"+
-    "\2\2\u0117\u0115\3\2\2\2\u0117\u0118\3\2\2\2\u0118\u011b\3\2\2\2\u0119"+
-    "\u0117\3\2\2\2\u011a\u011c\5\24\13\2\u011b\u011a\3\2\2\2\u011b\u011c\3"+
-    "\2\2\2\u011c\u011f\3\2\2\2\u011d\u011e\7[\2\2\u011e\u0120\5.\30\2\u011f"+
-    "\u011d\3\2\2\2\u011f\u0120\3\2\2\2\u0120\u0124\3\2\2\2\u0121\u0122\7*"+
-    "\2\2\u0122\u0123\7\17\2\2\u0123\u0125\5\26\f\2\u0124\u0121\3\2\2\2\u0124"+
-    "\u0125\3\2\2\2\u0125\u0128\3\2\2\2\u0126\u0127\7+\2\2\u0127\u0129\5.\30"+
-    "\2\u0128\u0126\3\2\2\2\u0128\u0129\3\2\2\2\u0129\23\3\2\2\2\u012a\u012b"+
-    "\7&\2\2\u012b\u0130\5\"\22\2\u012c\u012d\7\5\2\2\u012d\u012f\5\"\22\2"+
-    "\u012e\u012c\3\2\2\2\u012f\u0132\3\2\2\2\u0130\u012e\3\2\2\2\u0130\u0131"+
-    "\3\2\2\2\u0131\25\3\2\2\2\u0132\u0130\3\2\2\2\u0133\u0135\5\36\20\2\u0134"+
-    "\u0133\3\2\2\2\u0134\u0135\3\2\2\2\u0135\u0136\3\2\2\2\u0136\u013b\5\30"+
-    "\r\2\u0137\u0138\7\5\2\2\u0138\u013a\5\30\r\2\u0139\u0137\3\2\2\2\u013a"+
-    "\u013d\3\2\2\2\u013b\u0139\3\2\2\2\u013b\u013c\3\2\2\2\u013c\27\3\2\2"+
-    "\2\u013d\u013b\3\2\2\2\u013e\u013f\5\32\16\2\u013f\31\3\2\2\2\u0140\u0149"+
-    "\7\3\2\2\u0141\u0146\5,\27\2\u0142\u0143\7\5\2\2\u0143\u0145\5,\27\2\u0144"+
-    "\u0142\3\2\2\2\u0145\u0148\3\2\2\2\u0146\u0144\3\2\2\2\u0146\u0147\3\2"+
-    "\2\2\u0147\u014a\3\2\2\2\u0148\u0146\3\2\2\2\u0149\u0141\3\2\2\2\u0149"+
-    "\u014a\3\2\2\2\u014a\u014b\3\2\2\2\u014b\u014e\7\4\2\2\u014c\u014e\5,"+
-    "\27\2\u014d\u0140\3\2\2\2\u014d\u014c\3\2\2\2\u014e\33\3\2\2\2\u014f\u0150"+
-    "\5`\61\2\u0150\u0151\7\f\2\2\u0151\u0152\7\3\2\2\u0152\u0153\5\n\6\2\u0153"+
-    "\u0154\7\4\2\2\u0154\35\3\2\2\2\u0155\u0156\t\13\2\2\u0156\37\3\2\2\2"+
-    "\u0157\u015c\5,\27\2\u0158\u015a\7\f\2\2\u0159\u0158\3\2\2\2\u0159\u015a"+
-    "\3\2\2\2\u015a\u015b\3\2\2\2\u015b\u015d\5`\61\2\u015c\u0159\3\2\2\2\u015c"+
+    " \3!\3!\3!\5!\u025e\n!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\5\"\u026a"+
+    "\n\"\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\5%\u027f"+
+    "\n%\3&\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\5\'\u028d\n\'\3(\3(\3(\5"+
+    "(\u0292\n(\3(\3(\3(\7(\u0297\n(\f(\16(\u029a\13(\5(\u029c\n(\3(\3(\3)"+
+    "\3)\3)\5)\u02a3\n)\3*\3*\3*\3*\3*\6*\u02aa\n*\r*\16*\u02ab\3*\3*\3*\3"+
+    "*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\5*\u02bf\n*\3+\3+\3,\3,\3-\3"+
+    "-\5-\u02c7\n-\3-\3-\5-\u02cb\n-\3-\3-\3-\5-\u02d0\n-\3.\3.\3/\3/\3\60"+
+    "\3\60\3\60\7\60\u02d9\n\60\f\60\16\60\u02dc\13\60\3\60\3\60\3\61\3\61"+
+    "\5\61\u02e2\n\61\3\62\3\62\3\62\5\62\u02e7\n\62\3\62\3\62\3\62\3\62\5"+
+    "\62\u02ed\n\62\3\62\5\62\u02f0\n\62\3\63\3\63\5\63\u02f4\n\63\3\64\3\64"+
+    "\3\64\5\64\u02f9\n\64\3\65\3\65\5\65\u02fd\n\65\3\66\3\66\3\67\3\67\3"+
+    "\67\2\5.<>8\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\66"+
+    "8:<>@BDFHJLNPRTVXZ\\^`bdfhjl\2\22\b\2\7\7\t\t\37\37\67\67BBFF\4\2))TT"+
+    "\4\2\t\tBB\4\2&&..\3\2\33\34\3\2no\4\2\7\7xx\4\2\r\r\33\33\4\2$$\63\63"+
+    "\4\2\7\7\35\35\3\2pr\3\2gm\4\2##UU\7\2\30\31,-9<MN]^\3\2vw\30\2\b\t\22"+
+    "\23\25\30\32\32\37\37!!$%\'),,\60\60\63\63\66\6799;;BBFHJMPQSTWXZZ]]\u035f"+
+    "\2n\3\2\2\2\4q\3\2\2\2\6\u00d9\3\2\2\2\b\u00e4\3\2\2\2\n\u00e8\3\2\2\2"+
+    "\f\u00fd\3\2\2\2\16\u0104\3\2\2\2\20\u0106\3\2\2\2\22\u010e\3\2\2\2\24"+
+    "\u012a\3\2\2\2\26\u0134\3\2\2\2\30\u013e\3\2\2\2\32\u014d\3\2\2\2\34\u014f"+
+    "\3\2\2\2\36\u0155\3\2\2\2 \u0157\3\2\2\2\"\u015e\3\2\2\2$\u0170\3\2\2"+
+    "\2&\u0181\3\2\2\2(\u0191\3\2\2\2*\u01ac\3\2\2\2,\u01ae\3\2\2\2.\u01cf"+
+    "\3\2\2\2\60\u01e0\3\2\2\2\62\u01e3\3\2\2\2\64\u0215\3\2\2\2\66\u0217\3"+
+    "\2\2\28\u021a\3\2\2\2:\u0224\3\2\2\2<\u022a\3\2\2\2>\u0250\3\2\2\2@\u025d"+
+    "\3\2\2\2B\u0269\3\2\2\2D\u026b\3\2\2\2F\u0272\3\2\2\2H\u027e\3\2\2\2J"+
+    "\u0280\3\2\2\2L\u028c\3\2\2\2N\u028e\3\2\2\2P\u02a2\3\2\2\2R\u02be\3\2"+
+    "\2\2T\u02c0\3\2\2\2V\u02c2\3\2\2\2X\u02c4\3\2\2\2Z\u02d1\3\2\2\2\\\u02d3"+
+    "\3\2\2\2^\u02da\3\2\2\2`\u02e1\3\2\2\2b\u02ef\3\2\2\2d\u02f3\3\2\2\2f"+
+    "\u02f8\3\2\2\2h\u02fc\3\2\2\2j\u02fe\3\2\2\2l\u0300\3\2\2\2no\5\6\4\2"+
+    "op\7\2\2\3p\3\3\2\2\2qr\5,\27\2rs\7\2\2\3s\5\3\2\2\2t\u00da\5\b\5\2u\u0083"+
+    "\7!\2\2v\177\7\3\2\2wx\7H\2\2x~\t\2\2\2yz\7%\2\2z~\t\3\2\2{|\7Z\2\2|~"+
+    "\5V,\2}w\3\2\2\2}y\3\2\2\2}{\3\2\2\2~\u0081\3\2\2\2\177}\3\2\2\2\177\u0080"+
+    "\3\2\2\2\u0080\u0082\3\2\2\2\u0081\177\3\2\2\2\u0082\u0084\7\4\2\2\u0083"+
+    "v\3\2\2\2\u0083\u0084\3\2\2\2\u0084\u0085\3\2\2\2\u0085\u00da\5\6\4\2"+
+    "\u0086\u0092\7\32\2\2\u0087\u008e\7\3\2\2\u0088\u0089\7H\2\2\u0089\u008d"+
+    "\t\4\2\2\u008a\u008b\7%\2\2\u008b\u008d\t\3\2\2\u008c\u0088\3\2\2\2\u008c"+
+    "\u008a\3\2\2\2\u008d\u0090\3\2\2\2\u008e\u008c\3\2\2\2\u008e\u008f\3\2"+
+    "\2\2\u008f\u0091\3\2\2\2\u0090\u008e\3\2\2\2\u0091\u0093\7\4\2\2\u0092"+
+    "\u0087\3\2\2\2\u0092\u0093\3\2\2\2\u0093\u0094\3\2\2\2\u0094\u00da\5\6"+
+    "\4\2\u0095\u0096\7P\2\2\u0096\u0099\7S\2\2\u0097\u009a\5\66\34\2\u0098"+
+    "\u009a\5b\62\2\u0099\u0097\3\2\2\2\u0099\u0098\3\2\2\2\u0099\u009a\3\2"+
+    "\2\2\u009a\u00da\3\2\2\2\u009b\u009c\7P\2\2\u009c\u009d\7\23\2\2\u009d"+
+    "\u00a0\t\5\2\2\u009e\u00a1\5\66\34\2\u009f\u00a1\5b\62\2\u00a0\u009e\3"+
+    "\2\2\2\u00a0\u009f\3\2\2\2\u00a1\u00da\3\2\2\2\u00a2\u00a5\t\6\2\2\u00a3"+
+    "\u00a6\5\66\34\2\u00a4\u00a6\5b\62\2\u00a5\u00a3\3\2\2\2\u00a5\u00a4\3"+
+    "\2\2\2\u00a6\u00da\3\2\2\2\u00a7\u00a8\7P\2\2\u00a8\u00aa\7(\2\2\u00a9"+
+    "\u00ab\5\66\34\2\u00aa\u00a9\3\2\2\2\u00aa\u00ab\3\2\2\2\u00ab\u00da\3"+
+    "\2\2\2\u00ac\u00ad\7P\2\2\u00ad\u00da\7L\2\2\u00ae\u00af\7Q\2\2\u00af"+
+    "\u00b2\7S\2\2\u00b0\u00b1\7\21\2\2\u00b1\u00b3\5\66\34\2\u00b2\u00b0\3"+
+    "\2\2\2\u00b2\u00b3\3\2\2\2\u00b3\u00b6\3\2\2\2\u00b4\u00b7\5\66\34\2\u00b5"+
+    "\u00b7\5b\62\2\u00b6\u00b4\3\2\2\2\u00b6\u00b5\3\2\2\2\u00b6\u00b7\3\2"+
+    "\2\2\u00b7\u00c1\3\2\2\2\u00b8\u00b9\7W\2\2\u00b9\u00be\5j\66\2\u00ba"+
+    "\u00bb\7\5\2\2\u00bb\u00bd\5j\66\2\u00bc\u00ba\3\2\2\2\u00bd\u00c0\3\2"+
+    "\2\2\u00be\u00bc\3\2\2\2\u00be\u00bf\3\2\2\2\u00bf\u00c2\3\2\2\2\u00c0"+
+    "\u00be\3\2\2\2\u00c1\u00b8\3\2\2\2\u00c1\u00c2\3\2\2\2\u00c2\u00da\3\2"+
+    "\2\2\u00c3\u00c4\7Q\2\2\u00c4\u00c7\7\23\2\2\u00c5\u00c6\7\21\2\2\u00c6"+
+    "\u00c8\5j\66\2\u00c7\u00c5\3\2\2\2\u00c7\u00c8\3\2\2\2\u00c8\u00cc\3\2"+
+    "\2\2\u00c9\u00ca\7R\2\2\u00ca\u00cd\5\66\34\2\u00cb\u00cd\5b\62\2\u00cc"+
+    "\u00c9\3\2\2\2\u00cc\u00cb\3\2\2\2\u00cc\u00cd\3\2\2\2\u00cd\u00cf\3\2"+
+    "\2\2\u00ce\u00d0\5\66\34\2\u00cf\u00ce\3\2\2\2\u00cf\u00d0\3\2\2\2\u00d0"+
+    "\u00da\3\2\2\2\u00d1\u00d2\7Q\2\2\u00d2\u00d7\7X\2\2\u00d3\u00d5\t\7\2"+
+    "\2\u00d4\u00d3\3\2\2\2\u00d4\u00d5\3\2\2\2\u00d5\u00d6\3\2\2\2\u00d6\u00d8"+
+    "\5h\65\2\u00d7\u00d4\3\2\2\2\u00d7\u00d8\3\2\2\2\u00d8\u00da\3\2\2\2\u00d9"+
+    "t\3\2\2\2\u00d9u\3\2\2\2\u00d9\u0086\3\2\2\2\u00d9\u0095\3\2\2\2\u00d9"+
+    "\u009b\3\2\2\2\u00d9\u00a2\3\2\2\2\u00d9\u00a7\3\2\2\2\u00d9\u00ac\3\2"+
+    "\2\2\u00d9\u00ae\3\2\2\2\u00d9\u00c3\3\2\2\2\u00d9\u00d1\3\2\2\2\u00da"+
+    "\7\3\2\2\2\u00db\u00dc\7\\\2\2\u00dc\u00e1\5\34\17\2\u00dd\u00de\7\5\2"+
+    "\2\u00de\u00e0\5\34\17\2\u00df\u00dd\3\2\2\2\u00e0\u00e3\3\2\2\2\u00e1"+
+    "\u00df\3\2\2\2\u00e1\u00e2\3\2\2\2\u00e2\u00e5\3\2\2\2\u00e3\u00e1\3\2"+
+    "\2\2\u00e4\u00db\3\2\2\2\u00e4\u00e5\3\2\2\2\u00e5\u00e6\3\2\2\2\u00e6"+
+    "\u00e7\5\n\6\2\u00e7\t\3\2\2\2\u00e8\u00f3\5\16\b\2\u00e9\u00ea\7D\2\2"+
+    "\u00ea\u00eb\7\17\2\2\u00eb\u00f0\5\20\t\2\u00ec\u00ed\7\5\2\2\u00ed\u00ef"+
+    "\5\20\t\2\u00ee\u00ec\3\2\2\2\u00ef\u00f2\3\2\2\2\u00f0\u00ee\3\2\2\2"+
+    "\u00f0\u00f1\3\2\2\2\u00f1\u00f4\3\2\2\2\u00f2\u00f0\3\2\2\2\u00f3\u00e9"+
+    "\3\2\2\2\u00f3\u00f4\3\2\2\2\u00f4\u00f6\3\2\2\2\u00f5\u00f7\5\f\7\2\u00f6"+
+    "\u00f5\3\2\2\2\u00f6\u00f7\3\2\2\2\u00f7\13\3\2\2\2\u00f8\u00f9\7\66\2"+
+    "\2\u00f9\u00fe\t\b\2\2\u00fa\u00fb\7a\2\2\u00fb\u00fc\t\b\2\2\u00fc\u00fe"+
+    "\7f\2\2\u00fd\u00f8\3\2\2\2\u00fd\u00fa\3\2\2\2\u00fe\r\3\2\2\2\u00ff"+
+    "\u0105\5\22\n\2\u0100\u0101\7\3\2\2\u0101\u0102\5\n\6\2\u0102\u0103\7"+
+    "\4\2\2\u0103\u0105\3\2\2\2\u0104\u00ff\3\2\2\2\u0104\u0100\3\2\2\2\u0105"+
+    "\17\3\2\2\2\u0106\u0108\5,\27\2\u0107\u0109\t\t\2\2\u0108\u0107\3\2\2"+
+    "\2\u0108\u0109\3\2\2\2\u0109\u010c\3\2\2\2\u010a\u010b\7@\2\2\u010b\u010d"+
+    "\t\n\2\2\u010c\u010a\3\2\2\2\u010c\u010d\3\2\2\2\u010d\21\3\2\2\2\u010e"+
+    "\u0110\7O\2\2\u010f\u0111\5\36\20\2\u0110\u010f\3\2\2\2\u0110\u0111\3"+
+    "\2\2\2\u0111\u0112\3\2\2\2\u0112\u0117\5 \21\2\u0113\u0114\7\5\2\2\u0114"+
+    "\u0116\5 \21\2\u0115\u0113\3\2\2\2\u0116\u0119\3\2\2\2\u0117\u0115\3\2"+
+    "\2\2\u0117\u0118\3\2\2\2\u0118\u011b\3\2\2\2\u0119\u0117\3\2\2\2\u011a"+
+    "\u011c\5\24\13\2\u011b\u011a\3\2\2\2\u011b\u011c\3\2\2\2\u011c\u011f\3"+
+    "\2\2\2\u011d\u011e\7[\2\2\u011e\u0120\5.\30\2\u011f\u011d\3\2\2\2\u011f"+
+    "\u0120\3\2\2\2\u0120\u0124\3\2\2\2\u0121\u0122\7*\2\2\u0122\u0123\7\17"+
+    "\2\2\u0123\u0125\5\26\f\2\u0124\u0121\3\2\2\2\u0124\u0125\3\2\2\2\u0125"+
+    "\u0128\3\2\2\2\u0126\u0127\7+\2\2\u0127\u0129\5.\30\2\u0128\u0126\3\2"+
+    "\2\2\u0128\u0129\3\2\2\2\u0129\23\3\2\2\2\u012a\u012b\7&\2\2\u012b\u0130"+
+    "\5\"\22\2\u012c\u012d\7\5\2\2\u012d\u012f\5\"\22\2\u012e\u012c\3\2\2\2"+
+    "\u012f\u0132\3\2\2\2\u0130\u012e\3\2\2\2\u0130\u0131\3\2\2\2\u0131\25"+
+    "\3\2\2\2\u0132\u0130\3\2\2\2\u0133\u0135\5\36\20\2\u0134\u0133\3\2\2\2"+
+    "\u0134\u0135\3\2\2\2\u0135\u0136\3\2\2\2\u0136\u013b\5\30\r\2\u0137\u0138"+
+    "\7\5\2\2\u0138\u013a\5\30\r\2\u0139\u0137\3\2\2\2\u013a\u013d\3\2\2\2"+
+    "\u013b\u0139\3\2\2\2\u013b\u013c\3\2\2\2\u013c\27\3\2\2\2\u013d\u013b"+
+    "\3\2\2\2\u013e\u013f\5\32\16\2\u013f\31\3\2\2\2\u0140\u0149\7\3\2\2\u0141"+
+    "\u0146\5,\27\2\u0142\u0143\7\5\2\2\u0143\u0145\5,\27\2\u0144\u0142\3\2"+
+    "\2\2\u0145\u0148\3\2\2\2\u0146\u0144\3\2\2\2\u0146\u0147\3\2\2\2\u0147"+
+    "\u014a\3\2\2\2\u0148\u0146\3\2\2\2\u0149\u0141\3\2\2\2\u0149\u014a\3\2"+
+    "\2\2\u014a\u014b\3\2\2\2\u014b\u014e\7\4\2\2\u014c\u014e\5,\27\2\u014d"+
+    "\u0140\3\2\2\2\u014d\u014c\3\2\2\2\u014e\33\3\2\2\2\u014f\u0150\5`\61"+
+    "\2\u0150\u0151\7\f\2\2\u0151\u0152\7\3\2\2\u0152\u0153\5\n\6\2\u0153\u0154"+
+    "\7\4\2\2\u0154\35\3\2\2\2\u0155\u0156\t\13\2\2\u0156\37\3\2\2\2\u0157"+
+    "\u015c\5,\27\2\u0158\u015a\7\f\2\2\u0159\u0158\3\2\2\2\u0159\u015a\3\2"+
+    "\2\2\u015a\u015b\3\2\2\2\u015b\u015d\5`\61\2\u015c\u0159\3\2\2\2\u015c"+
     "\u015d\3\2\2\2\u015d!\3\2\2\2\u015e\u0162\5*\26\2\u015f\u0161\5$\23\2"+
     "\u0160\u015f\3\2\2\2\u0161\u0164\3\2\2\2\u0162\u0160\3\2\2\2\u0162\u0163"+
     "\3\2\2\2\u0163#\3\2\2\2\u0164\u0162\3\2\2\2\u0165\u0166\5&\24\2\u0166"+
@@ -6723,77 +6706,70 @@ class SqlBaseParser extends Parser {
     "\u024b\3\2\2\2\u0250\u024c\3\2\2\2\u0251\u0257\3\2\2\2\u0252\u0253\f\13"+
     "\2\2\u0253\u0254\7s\2\2\u0254\u0256\5\\/\2\u0255\u0252\3\2\2\2\u0256\u0259"+
     "\3\2\2\2\u0257\u0255\3\2\2\2\u0257\u0258\3\2\2\2\u0258?\3\2\2\2\u0259"+
-    "\u0257\3\2\2\2\u025a\u0260\7\27\2\2\u025b\u025d\7\3\2\2\u025c\u025e\7"+
-    "x\2\2\u025d\u025c\3\2\2\2\u025d\u025e\3\2\2\2\u025e\u025f\3\2\2\2\u025f"+
-    "\u0261\7\4\2\2\u0260\u025b\3\2\2\2\u0260\u0261\3\2\2\2\u0261\u0270\3\2"+
-    "\2\2\u0262\u0265\7\25\2\2\u0263\u0264\7\3\2\2\u0264\u0266\7\4\2\2\u0265"+
-    "\u0263\3\2\2\2\u0265\u0266\3\2\2\2\u0266\u0270\3\2\2\2\u0267\u026d\7\26"+
-    "\2\2\u0268\u026a\7\3\2\2\u0269\u026b\7x\2\2\u026a\u0269\3\2\2\2\u026a"+
-    "\u026b\3\2\2\2\u026b\u026c\3\2\2\2\u026c\u026e\7\4\2\2\u026d\u0268\3\2"+
-    "\2\2\u026d\u026e\3\2\2\2\u026e\u0270\3\2\2\2\u026f\u025a\3\2\2\2\u026f"+
-    "\u0262\3\2\2\2\u026f\u0267\3\2\2\2\u0270A\3\2\2\2\u0271\u027c\5D#\2\u0272"+
-    "\u0273\7`\2\2\u0273\u0274\5D#\2\u0274\u0275\7f\2\2\u0275\u027c\3\2\2\2"+
-    "\u0276\u027c\5F$\2\u0277\u0278\7`\2\2\u0278\u0279\5F$\2\u0279\u027a\7"+
-    "f\2\2\u027a\u027c\3\2\2\2\u027b\u0271\3\2\2\2\u027b\u0272\3\2\2\2\u027b"+
-    "\u0276\3\2\2\2\u027b\u0277\3\2\2\2\u027cC\3\2\2\2\u027d\u027e\7\20\2\2"+
-    "\u027e\u027f\7\3\2\2\u027f\u0280\5,\27\2\u0280\u0281\7\f\2\2\u0281\u0282"+
-    "\5\\/\2\u0282\u0283\7\4\2\2\u0283E\3\2\2\2\u0284\u0285\7\24\2\2\u0285"+
-    "\u0286\7\3\2\2\u0286\u0287\5,\27\2\u0287\u0288\7\5\2\2\u0288\u0289\5\\"+
-    "/\2\u0289\u028a\7\4\2\2\u028aG\3\2\2\2\u028b\u0291\5J&\2\u028c\u028d\7"+
-    "`\2\2\u028d\u028e\5J&\2\u028e\u028f\7f\2\2\u028f\u0291\3\2\2\2\u0290\u028b"+
-    "\3\2\2\2\u0290\u028c\3\2\2\2\u0291I\3\2\2\2\u0292\u0293\7\"\2\2\u0293"+
-    "\u0294\7\3\2\2\u0294\u0295\5`\61\2\u0295\u0296\7&\2\2\u0296\u0297\5<\37"+
-    "\2\u0297\u0298\7\4\2\2\u0298K\3\2\2\2\u0299\u029f\5N(\2\u029a\u029b\7"+
-    "`\2\2\u029b\u029c\5N(\2\u029c\u029d\7f\2\2\u029d\u029f\3\2\2\2\u029e\u0299"+
-    "\3\2\2\2\u029e\u029a\3\2\2\2\u029fM\3\2\2\2\u02a0\u02a1\5P)\2\u02a1\u02ad"+
-    "\7\3\2\2\u02a2\u02a4\5\36\20\2\u02a3\u02a2\3\2\2\2\u02a3\u02a4\3\2\2\2"+
-    "\u02a4\u02a5\3\2\2\2\u02a5\u02aa\5,\27\2\u02a6\u02a7\7\5\2\2\u02a7\u02a9"+
-    "\5,\27\2\u02a8\u02a6\3\2\2\2\u02a9\u02ac\3\2\2\2\u02aa\u02a8\3\2\2\2\u02aa"+
-    "\u02ab\3\2\2\2\u02ab\u02ae\3\2\2\2\u02ac\u02aa\3\2\2\2\u02ad\u02a3\3\2"+
-    "\2\2\u02ad\u02ae\3\2\2\2\u02ae\u02af\3\2\2\2\u02af\u02b0\7\4\2\2\u02b0"+
-    "O\3\2\2\2\u02b1\u02b5\7\64\2\2\u02b2\u02b5\7I\2\2\u02b3\u02b5\5`\61\2"+
-    "\u02b4\u02b1\3\2\2\2\u02b4\u02b2\3\2\2\2\u02b4\u02b3\3\2\2\2\u02b5Q\3"+
-    "\2\2\2\u02b6\u02d1\7?\2\2\u02b7\u02d1\5X-\2\u02b8\u02d1\5h\65\2\u02b9"+
-    "\u02d1\5V,\2\u02ba\u02bc\7w\2\2\u02bb\u02ba\3\2\2\2\u02bc\u02bd\3\2\2"+
-    "\2\u02bd\u02bb\3\2\2\2\u02bd\u02be\3\2\2\2\u02be\u02d1\3\2\2\2\u02bf\u02d1"+
-    "\7v\2\2\u02c0\u02c1\7b\2\2\u02c1\u02c2\5j\66\2\u02c2\u02c3\7f\2\2\u02c3"+
-    "\u02d1\3\2\2\2\u02c4\u02c5\7c\2\2\u02c5\u02c6\5j\66\2\u02c6\u02c7\7f\2"+
-    "\2\u02c7\u02d1\3\2\2\2\u02c8\u02c9\7d\2\2\u02c9\u02ca\5j\66\2\u02ca\u02cb"+
-    "\7f\2\2\u02cb\u02d1\3\2\2\2\u02cc\u02cd\7e\2\2\u02cd\u02ce\5j\66\2\u02ce"+
-    "\u02cf\7f\2\2\u02cf\u02d1\3\2\2\2\u02d0\u02b6\3\2\2\2\u02d0\u02b7\3\2"+
-    "\2\2\u02d0\u02b8\3\2\2\2\u02d0\u02b9\3\2\2\2\u02d0\u02bb\3\2\2\2\u02d0"+
-    "\u02bf\3\2\2\2\u02d0\u02c0\3\2\2\2\u02d0\u02c4\3\2\2\2\u02d0\u02c8\3\2"+
-    "\2\2\u02d0\u02cc\3\2\2\2\u02d1S\3\2\2\2\u02d2\u02d3\t\r\2\2\u02d3U\3\2"+
-    "\2\2\u02d4\u02d5\t\16\2\2\u02d5W\3\2\2\2\u02d6\u02d8\7\60\2\2\u02d7\u02d9"+
-    "\t\7\2\2\u02d8\u02d7\3\2\2\2\u02d8\u02d9\3\2\2\2\u02d9\u02dc\3\2\2\2\u02da"+
-    "\u02dd\5h\65\2\u02db\u02dd\5j\66\2\u02dc\u02da\3\2\2\2\u02dc\u02db\3\2"+
-    "\2\2\u02dd\u02de\3\2\2\2\u02de\u02e1\5Z.\2\u02df\u02e0\7V\2\2\u02e0\u02e2"+
-    "\5Z.\2\u02e1\u02df\3\2\2\2\u02e1\u02e2\3\2\2\2\u02e2Y\3\2\2\2\u02e3\u02e4"+
-    "\t\17\2\2\u02e4[\3\2\2\2\u02e5\u02e6\5`\61\2\u02e6]\3\2\2\2\u02e7\u02e8"+
-    "\5`\61\2\u02e8\u02e9\7u\2\2\u02e9\u02eb\3\2\2\2\u02ea\u02e7\3\2\2\2\u02eb"+
-    "\u02ee\3\2\2\2\u02ec\u02ea\3\2\2\2\u02ec\u02ed\3\2\2\2\u02ed\u02ef\3\2"+
-    "\2\2\u02ee\u02ec\3\2\2\2\u02ef\u02f0\5`\61\2\u02f0_\3\2\2\2\u02f1\u02f4"+
-    "\5d\63\2\u02f2\u02f4\5f\64\2\u02f3\u02f1\3\2\2\2\u02f3\u02f2\3\2\2\2\u02f4"+
-    "a\3\2\2\2\u02f5\u02f6\5`\61\2\u02f6\u02f7\7\6\2\2\u02f7\u02f9\3\2\2\2"+
-    "\u02f8\u02f5\3\2\2\2\u02f8\u02f9\3\2\2\2\u02f9\u02fa\3\2\2\2\u02fa\u0302"+
-    "\7|\2\2\u02fb\u02fc\5`\61\2\u02fc\u02fd\7\6\2\2\u02fd\u02ff\3\2\2\2\u02fe"+
-    "\u02fb\3\2\2\2\u02fe\u02ff\3\2\2\2\u02ff\u0300\3\2\2\2\u0300\u0302\5`"+
-    "\61\2\u0301\u02f8\3\2\2\2\u0301\u02fe\3\2\2\2\u0302c\3\2\2\2\u0303\u0306"+
-    "\7}\2\2\u0304\u0306\7~\2\2\u0305\u0303\3\2\2\2\u0305\u0304\3\2\2\2\u0306"+
-    "e\3\2\2\2\u0307\u030b\7z\2\2\u0308\u030b\5l\67\2\u0309\u030b\7{\2\2\u030a"+
-    "\u0307\3\2\2\2\u030a\u0308\3\2\2\2\u030a\u0309\3\2\2\2\u030bg\3\2\2\2"+
-    "\u030c\u030f\7y\2\2\u030d\u030f\7x\2\2\u030e\u030c\3\2\2\2\u030e\u030d"+
-    "\3\2\2\2\u030fi\3\2\2\2\u0310\u0311\t\20\2\2\u0311k\3\2\2\2\u0312\u0313"+
-    "\t\21\2\2\u0313m\3\2\2\2m}\177\u0083\u008c\u008e\u0092\u0099\u00a0\u00a5"+
-    "\u00aa\u00b2\u00b6\u00be\u00c1\u00c7\u00cc\u00cf\u00d4\u00d7\u00d9\u00e1"+
-    "\u00e4\u00f0\u00f3\u00f6\u00fd\u0104\u0108\u010c\u0110\u0117\u011b\u011f"+
-    "\u0124\u0128\u0130\u0134\u013b\u0146\u0149\u014d\u0159\u015c\u0162\u0169"+
-    "\u0170\u0173\u0177\u017b\u017f\u0181\u018c\u0191\u0195\u0198\u019e\u01a1"+
-    "\u01a7\u01aa\u01ac\u01cf\u01d7\u01d9\u01e0\u01e5\u01e8\u01f0\u01f9\u01ff"+
-    "\u0207\u020c\u0212\u0215\u021c\u0224\u022a\u0236\u0238\u0243\u0250\u0257"+
-    "\u025d\u0260\u0265\u026a\u026d\u026f\u027b\u0290\u029e\u02a3\u02aa\u02ad"+
-    "\u02b4\u02bd\u02d0\u02d8\u02dc\u02e1\u02ec\u02f3\u02f8\u02fe\u0301\u0305"+
-    "\u030a\u030e";
+    "\u0257\3\2\2\2\u025a\u025e\7\27\2\2\u025b\u025e\7\25\2\2\u025c\u025e\7"+
+    "\26\2\2\u025d\u025a\3\2\2\2\u025d\u025b\3\2\2\2\u025d\u025c\3\2\2\2\u025e"+
+    "A\3\2\2\2\u025f\u026a\5D#\2\u0260\u0261\7`\2\2\u0261\u0262\5D#\2\u0262"+
+    "\u0263\7f\2\2\u0263\u026a\3\2\2\2\u0264\u026a\5F$\2\u0265\u0266\7`\2\2"+
+    "\u0266\u0267\5F$\2\u0267\u0268\7f\2\2\u0268\u026a\3\2\2\2\u0269\u025f"+
+    "\3\2\2\2\u0269\u0260\3\2\2\2\u0269\u0264\3\2\2\2\u0269\u0265\3\2\2\2\u026a"+
+    "C\3\2\2\2\u026b\u026c\7\20\2\2\u026c\u026d\7\3\2\2\u026d\u026e\5,\27\2"+
+    "\u026e\u026f\7\f\2\2\u026f\u0270\5\\/\2\u0270\u0271\7\4\2\2\u0271E\3\2"+
+    "\2\2\u0272\u0273\7\24\2\2\u0273\u0274\7\3\2\2\u0274\u0275\5,\27\2\u0275"+
+    "\u0276\7\5\2\2\u0276\u0277\5\\/\2\u0277\u0278\7\4\2\2\u0278G\3\2\2\2\u0279"+
+    "\u027f\5J&\2\u027a\u027b\7`\2\2\u027b\u027c\5J&\2\u027c\u027d\7f\2\2\u027d"+
+    "\u027f\3\2\2\2\u027e\u0279\3\2\2\2\u027e\u027a\3\2\2\2\u027fI\3\2\2\2"+
+    "\u0280\u0281\7\"\2\2\u0281\u0282\7\3\2\2\u0282\u0283\5`\61\2\u0283\u0284"+
+    "\7&\2\2\u0284\u0285\5<\37\2\u0285\u0286\7\4\2\2\u0286K\3\2\2\2\u0287\u028d"+
+    "\5N(\2\u0288\u0289\7`\2\2\u0289\u028a\5N(\2\u028a\u028b\7f\2\2\u028b\u028d"+
+    "\3\2\2\2\u028c\u0287\3\2\2\2\u028c\u0288\3\2\2\2\u028dM\3\2\2\2\u028e"+
+    "\u028f\5P)\2\u028f\u029b\7\3\2\2\u0290\u0292\5\36\20\2\u0291\u0290\3\2"+
+    "\2\2\u0291\u0292\3\2\2\2\u0292\u0293\3\2\2\2\u0293\u0298\5,\27\2\u0294"+
+    "\u0295\7\5\2\2\u0295\u0297\5,\27\2\u0296\u0294\3\2\2\2\u0297\u029a\3\2"+
+    "\2\2\u0298\u0296\3\2\2\2\u0298\u0299\3\2\2\2\u0299\u029c\3\2\2\2\u029a"+
+    "\u0298\3\2\2\2\u029b\u0291\3\2\2\2\u029b\u029c\3\2\2\2\u029c\u029d\3\2"+
+    "\2\2\u029d\u029e\7\4\2\2\u029eO\3\2\2\2\u029f\u02a3\7\64\2\2\u02a0\u02a3"+
+    "\7I\2\2\u02a1\u02a3\5`\61\2\u02a2\u029f\3\2\2\2\u02a2\u02a0\3\2\2\2\u02a2"+
+    "\u02a1\3\2\2\2\u02a3Q\3\2\2\2\u02a4\u02bf\7?\2\2\u02a5\u02bf\5X-\2\u02a6"+
+    "\u02bf\5h\65\2\u02a7\u02bf\5V,\2\u02a8\u02aa\7w\2\2\u02a9\u02a8\3\2\2"+
+    "\2\u02aa\u02ab\3\2\2\2\u02ab\u02a9\3\2\2\2\u02ab\u02ac\3\2\2\2\u02ac\u02bf"+
+    "\3\2\2\2\u02ad\u02bf\7v\2\2\u02ae\u02af\7b\2\2\u02af\u02b0\5j\66\2\u02b0"+
+    "\u02b1\7f\2\2\u02b1\u02bf\3\2\2\2\u02b2\u02b3\7c\2\2\u02b3\u02b4\5j\66"+
+    "\2\u02b4\u02b5\7f\2\2\u02b5\u02bf\3\2\2\2\u02b6\u02b7\7d\2\2\u02b7\u02b8"+
+    "\5j\66\2\u02b8\u02b9\7f\2\2\u02b9\u02bf\3\2\2\2\u02ba\u02bb\7e\2\2\u02bb"+
+    "\u02bc\5j\66\2\u02bc\u02bd\7f\2\2\u02bd\u02bf\3\2\2\2\u02be\u02a4\3\2"+
+    "\2\2\u02be\u02a5\3\2\2\2\u02be\u02a6\3\2\2\2\u02be\u02a7\3\2\2\2\u02be"+
+    "\u02a9\3\2\2\2\u02be\u02ad\3\2\2\2\u02be\u02ae\3\2\2\2\u02be\u02b2\3\2"+
+    "\2\2\u02be\u02b6\3\2\2\2\u02be\u02ba\3\2\2\2\u02bfS\3\2\2\2\u02c0\u02c1"+
+    "\t\r\2\2\u02c1U\3\2\2\2\u02c2\u02c3\t\16\2\2\u02c3W\3\2\2\2\u02c4\u02c6"+
+    "\7\60\2\2\u02c5\u02c7\t\7\2\2\u02c6\u02c5\3\2\2\2\u02c6\u02c7\3\2\2\2"+
+    "\u02c7\u02ca\3\2\2\2\u02c8\u02cb\5h\65\2\u02c9\u02cb\5j\66\2\u02ca\u02c8"+
+    "\3\2\2\2\u02ca\u02c9\3\2\2\2\u02cb\u02cc\3\2\2\2\u02cc\u02cf\5Z.\2\u02cd"+
+    "\u02ce\7V\2\2\u02ce\u02d0\5Z.\2\u02cf\u02cd\3\2\2\2\u02cf\u02d0\3\2\2"+
+    "\2\u02d0Y\3\2\2\2\u02d1\u02d2\t\17\2\2\u02d2[\3\2\2\2\u02d3\u02d4\5`\61"+
+    "\2\u02d4]\3\2\2\2\u02d5\u02d6\5`\61\2\u02d6\u02d7\7u\2\2\u02d7\u02d9\3"+
+    "\2\2\2\u02d8\u02d5\3\2\2\2\u02d9\u02dc\3\2\2\2\u02da\u02d8\3\2\2\2\u02da"+
+    "\u02db\3\2\2\2\u02db\u02dd\3\2\2\2\u02dc\u02da\3\2\2\2\u02dd\u02de\5`"+
+    "\61\2\u02de_\3\2\2\2\u02df\u02e2\5d\63\2\u02e0\u02e2\5f\64\2\u02e1\u02df"+
+    "\3\2\2\2\u02e1\u02e0\3\2\2\2\u02e2a\3\2\2\2\u02e3\u02e4\5`\61\2\u02e4"+
+    "\u02e5\7\6\2\2\u02e5\u02e7\3\2\2\2\u02e6\u02e3\3\2\2\2\u02e6\u02e7\3\2"+
+    "\2\2\u02e7\u02e8\3\2\2\2\u02e8\u02f0\7|\2\2\u02e9\u02ea\5`\61\2\u02ea"+
+    "\u02eb\7\6\2\2\u02eb\u02ed\3\2\2\2\u02ec\u02e9\3\2\2\2\u02ec\u02ed\3\2"+
+    "\2\2\u02ed\u02ee\3\2\2\2\u02ee\u02f0\5`\61\2\u02ef\u02e6\3\2\2\2\u02ef"+
+    "\u02ec\3\2\2\2\u02f0c\3\2\2\2\u02f1\u02f4\7}\2\2\u02f2\u02f4\7~\2\2\u02f3"+
+    "\u02f1\3\2\2\2\u02f3\u02f2\3\2\2\2\u02f4e\3\2\2\2\u02f5\u02f9\7z\2\2\u02f6"+
+    "\u02f9\5l\67\2\u02f7\u02f9\7{\2\2\u02f8\u02f5\3\2\2\2\u02f8\u02f6\3\2"+
+    "\2\2\u02f8\u02f7\3\2\2\2\u02f9g\3\2\2\2\u02fa\u02fd\7y\2\2\u02fb\u02fd"+
+    "\7x\2\2\u02fc\u02fa\3\2\2\2\u02fc\u02fb\3\2\2\2\u02fdi\3\2\2\2\u02fe\u02ff"+
+    "\t\20\2\2\u02ffk\3\2\2\2\u0300\u0301\t\21\2\2\u0301m\3\2\2\2h}\177\u0083"+
+    "\u008c\u008e\u0092\u0099\u00a0\u00a5\u00aa\u00b2\u00b6\u00be\u00c1\u00c7"+
+    "\u00cc\u00cf\u00d4\u00d7\u00d9\u00e1\u00e4\u00f0\u00f3\u00f6\u00fd\u0104"+
+    "\u0108\u010c\u0110\u0117\u011b\u011f\u0124\u0128\u0130\u0134\u013b\u0146"+
+    "\u0149\u014d\u0159\u015c\u0162\u0169\u0170\u0173\u0177\u017b\u017f\u0181"+
+    "\u018c\u0191\u0195\u0198\u019e\u01a1\u01a7\u01aa\u01ac\u01cf\u01d7\u01d9"+
+    "\u01e0\u01e5\u01e8\u01f0\u01f9\u01ff\u0207\u020c\u0212\u0215\u021c\u0224"+
+    "\u022a\u0236\u0238\u0243\u0250\u0257\u025d\u0269\u027e\u028c\u0291\u0298"+
+    "\u029b\u02a2\u02ab\u02be\u02c6\u02ca\u02cf\u02da\u02e1\u02e6\u02ec\u02ef"+
+    "\u02f3\u02f8\u02fc";
   public static final ATN _ATN =
     new ATNDeserializer().deserialize(_serializedATN.toCharArray());
   static {
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java
index a6b0f12dafb..0f8afdd1552 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java
@@ -8,6 +8,9 @@ package org.elasticsearch.xpack.sql.util;
 
 import org.elasticsearch.common.time.DateFormatter;
 import org.elasticsearch.common.time.DateFormatters;
+import org.elasticsearch.xpack.sql.expression.Expression;
+import org.elasticsearch.xpack.sql.expression.Foldables;
+import org.elasticsearch.xpack.sql.parser.ParsingException;
 import org.elasticsearch.xpack.sql.proto.StringUtils;
 
 import java.time.Instant;
@@ -37,6 +40,7 @@ public final class DateUtils {
         .toFormatter().withZone(UTC);
 
     private static final DateFormatter UTC_DATE_TIME_FORMATTER = DateFormatter.forPattern("date_optional_time").withZone(UTC);
+    private static final int DEFAULT_PRECISION_FOR_CURRENT_FUNCTIONS = 3;
 
     private DateUtils() {}
 
@@ -123,4 +127,25 @@ public final class DateUtils {
         }
         return l - (l % DAY_IN_MILLIS);
     }
+
+    public static int getNanoPrecision(Expression precisionExpression, int nano) {
+        int precision = DEFAULT_PRECISION_FOR_CURRENT_FUNCTIONS;
+
+        if (precisionExpression != null) {
+            try {
+                precision = Foldables.intValueOf(precisionExpression);
+            } catch (Exception e) {
+                throw new ParsingException(precisionExpression.source(), "invalid precision; " + e.getMessage());
+            }
+        }
+
+        if (precision < 0 || precision > 9) {
+            throw new ParsingException(precisionExpression.source(), "precision needs to be between [0-9], received [{}]",
+                precisionExpression.sourceText());
+        }
+
+        // remove the remainder
+        nano = nano - nano % (int) Math.pow(10, (9 - precision));
+        return nano;
+    }
 }
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTimeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTimeTests.java
index 405d4805410..166490699bb 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTimeTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTimeTests.java
@@ -6,11 +6,21 @@
 
 package org.elasticsearch.xpack.sql.expression.function.scalar.datetime;
 
+import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.xpack.sql.TestUtils;
+import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer;
+import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier;
+import org.elasticsearch.xpack.sql.analysis.index.EsIndex;
+import org.elasticsearch.xpack.sql.analysis.index.IndexResolution;
 import org.elasticsearch.xpack.sql.expression.Expression;
 import org.elasticsearch.xpack.sql.expression.Literal;
+import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry;
+import org.elasticsearch.xpack.sql.parser.ParsingException;
+import org.elasticsearch.xpack.sql.parser.SqlParser;
 import org.elasticsearch.xpack.sql.session.Configuration;
+import org.elasticsearch.xpack.sql.stats.Metrics;
 import org.elasticsearch.xpack.sql.tree.AbstractNodeTestCase;
+import org.elasticsearch.xpack.sql.type.TypesTests;
 
 import java.time.ZoneId;
 import java.time.ZonedDateTime;
@@ -22,7 +32,7 @@ import static org.elasticsearch.xpack.sql.tree.Source.EMPTY;
 public class CurrentDateTimeTests extends AbstractNodeTestCase<CurrentDateTime, Expression> {
 
     public static CurrentDateTime randomCurrentDateTime() {
-        return new CurrentDateTime(EMPTY, Literal.of(EMPTY, randomInt(10)), TestUtils.randomConfiguration());
+        return new CurrentDateTime(EMPTY, Literal.of(EMPTY, randomInt(9)), TestUtils.randomConfiguration());
     }
 
     @Override
@@ -39,8 +49,8 @@ public class CurrentDateTimeTests extends AbstractNodeTestCase<CurrentDateTime,
     protected CurrentDateTime mutate(CurrentDateTime instance) {
         ZonedDateTime now = instance.configuration().now();
         ZoneId mutatedZoneId = randomValueOtherThanMany(o -> Objects.equals(now.getOffset(), o.getRules().getOffset(now.toInstant())),
-                () -> randomZone());
-        return new CurrentDateTime(instance.source(), Literal.of(EMPTY, randomInt(10)), TestUtils.randomConfiguration(mutatedZoneId));
+            ESTestCase::randomZone);
+        return new CurrentDateTime(instance.source(), Literal.of(EMPTY, randomInt(9)), TestUtils.randomConfiguration(mutatedZoneId));
     }
 
     @Override
@@ -75,4 +85,19 @@ public class CurrentDateTimeTests extends AbstractNodeTestCase<CurrentDateTime,
         ZonedDateTime zdt = ZonedDateTime.parse("2019-02-26T12:34:56.123456789Z");
         assertEquals(123_000_000, CurrentDateTime.nanoPrecision(zdt, null).getNano());
     }
+
+    public void testInvalidPrecision() {
+        SqlParser parser = new SqlParser();
+        IndexResolution indexResolution = IndexResolution.valid(new EsIndex("test",
+            TypesTests.loadMapping("mapping-multi-field-with-nested.json")));
+
+        Analyzer analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), indexResolution, new Verifier(new Metrics()));
+        ParsingException e = expectThrows(ParsingException.class, () ->
+            analyzer.analyze(parser.createStatement("SELECT CURRENT_TIMESTAMP(100000000000000)"), true));
+        assertEquals("line 1:27: invalid precision; [100000000000000] out of [integer] range", e.getMessage());
+
+        e = expectThrows(ParsingException.class, () ->
+            analyzer.analyze(parser.createStatement("SELECT CURRENT_TIMESTAMP(100)"), true));
+        assertEquals("line 1:27: precision needs to be between [0-9], received [100]", e.getMessage());
+    }
 }
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentTimeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentTimeTests.java
index 639ac4b7b92..8603a3c6dd9 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentTimeTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentTimeTests.java
@@ -6,11 +6,21 @@
 
 package org.elasticsearch.xpack.sql.expression.function.scalar.datetime;
 
+import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.xpack.sql.TestUtils;
+import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer;
+import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier;
+import org.elasticsearch.xpack.sql.analysis.index.EsIndex;
+import org.elasticsearch.xpack.sql.analysis.index.IndexResolution;
 import org.elasticsearch.xpack.sql.expression.Expression;
 import org.elasticsearch.xpack.sql.expression.Literal;
+import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry;
+import org.elasticsearch.xpack.sql.parser.ParsingException;
+import org.elasticsearch.xpack.sql.parser.SqlParser;
 import org.elasticsearch.xpack.sql.session.Configuration;
+import org.elasticsearch.xpack.sql.stats.Metrics;
 import org.elasticsearch.xpack.sql.tree.AbstractNodeTestCase;
+import org.elasticsearch.xpack.sql.type.TypesTests;
 
 import java.time.OffsetTime;
 import java.time.ZoneId;
@@ -23,7 +33,7 @@ import static org.elasticsearch.xpack.sql.tree.Source.EMPTY;
 public class CurrentTimeTests extends AbstractNodeTestCase<CurrentTime, Expression> {
 
     public static CurrentTime randomCurrentTime() {
-        return new CurrentTime(EMPTY, Literal.of(EMPTY, randomInt(10)), TestUtils.randomConfiguration());
+        return new CurrentTime(EMPTY, Literal.of(EMPTY, randomInt(9)), TestUtils.randomConfiguration());
     }
 
     @Override
@@ -40,8 +50,8 @@ public class CurrentTimeTests extends AbstractNodeTestCase<CurrentTime, Expressi
     protected CurrentTime mutate(CurrentTime instance) {
         ZonedDateTime now = instance.configuration().now();
         ZoneId mutatedZoneId = randomValueOtherThanMany(o -> Objects.equals(now.getOffset(), o.getRules().getOffset(now.toInstant())),
-                () -> randomZone());
-        return new CurrentTime(instance.source(), Literal.of(EMPTY, randomInt(10)), TestUtils.randomConfiguration(mutatedZoneId));
+            ESTestCase::randomZone);
+        return new CurrentTime(instance.source(), Literal.of(EMPTY, randomInt(9)), TestUtils.randomConfiguration(mutatedZoneId));
     }
 
     @Override
@@ -76,4 +86,19 @@ public class CurrentTimeTests extends AbstractNodeTestCase<CurrentTime, Expressi
         OffsetTime ot = OffsetTime.parse("12:34:56.123456789Z");
         assertEquals(123_000_000, CurrentTime.nanoPrecision(ot, null).getNano());
     }
+
+    public void testInvalidPrecision() {
+        SqlParser parser = new SqlParser();
+        IndexResolution indexResolution = IndexResolution.valid(new EsIndex("test",
+            TypesTests.loadMapping("mapping-multi-field-with-nested.json")));
+
+        Analyzer analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), indexResolution, new Verifier(new Metrics()));
+        ParsingException e = expectThrows(ParsingException.class, () ->
+            analyzer.analyze(parser.createStatement("SELECT CURRENT_TIME(100000000000000)"), true));
+        assertEquals("line 1:22: invalid precision; [100000000000000] out of [integer] range", e.getMessage());
+
+        e = expectThrows(ParsingException.class, () ->
+            analyzer.analyze(parser.createStatement("SELECT CURRENT_TIME(100)"), true));
+        assertEquals("line 1:22: precision needs to be between [0-9], received [100]", e.getMessage());
+    }
 }
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java
index 6fd4611a434..c22c9c57029 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java
@@ -226,6 +226,29 @@ public class EscapedFunctionsTests extends ESTestCase {
         assertEquals("line 1:8: Invalid GUID, too short", ex.getMessage());
     }
 
+    public void testCurrentTimestampAsEscapedExpression() {
+        Expression expr = parser.createExpression("{fn CURRENT_TIMESTAMP(2)}");
+        assertEquals(UnresolvedFunction.class, expr.getClass());
+        UnresolvedFunction ur = (UnresolvedFunction) expr;
+        assertEquals("{fn CURRENT_TIMESTAMP(2)}", ur.sourceText());
+        assertEquals(1, ur.children().size());
+    }
+
+    public void testCurrentDateAsEscapedExpression() {
+        Expression expr = parser.createExpression("{fn CURRENT_DATE()}");
+        assertEquals(UnresolvedFunction.class, expr.getClass());
+        UnresolvedFunction ur = (UnresolvedFunction) expr;
+        assertEquals("{fn CURRENT_DATE()}", ur.sourceText());
+        assertEquals(0, ur.children().size());
+    }
+
+    public void testCurrentTimeAsEscapedExpression() {
+        Expression expr = parser.createExpression("{fn CURRENT_TIME(2)}");
+        assertEquals(UnresolvedFunction.class, expr.getClass());
+        UnresolvedFunction ur = (UnresolvedFunction) expr;
+        assertEquals("{fn CURRENT_TIME(2)}", ur.sourceText());
+        assertEquals(1, ur.children().size());
+    }
 
     public void testLimit() {
         Limit limit = limit(10);
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java
index 658c11a8ca5..98d7922d218 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java
@@ -398,12 +398,7 @@ public class ExpressionTests extends ESTestCase {
         assertEquals(1, ur.children().size());
         Expression child = ur.children().get(0);
         assertEquals(Literal.class, child.getClass());
-        assertEquals(Short.valueOf((short) 4), child.fold());
-    }
-
-    public void testCurrentTimestampInvalidPrecision() {
-        ParsingException ex = expectThrows(ParsingException.class, () -> parser.createExpression("CURRENT_TIMESTAMP(100)"));
-        assertEquals("line 1:20: Precision needs to be between [0-9], received [100]", ex.getMessage());
+        assertEquals(4, child.fold());
     }
 
     public void testCurrentDate() {
@@ -438,12 +433,7 @@ public class ExpressionTests extends ESTestCase {
         assertEquals(1, ur.children().size());
         Expression child = ur.children().get(0);
         assertEquals(Literal.class, child.getClass());
-        assertEquals(Short.valueOf((short) 7), child.fold());
-    }
-
-    public void testCurrentTimeInvalidPrecision() {
-        ParsingException ex = expectThrows(ParsingException.class, () -> parser.createExpression("CURRENT_TIME(100)"));
-        assertEquals("line 1:15: Precision needs to be between [0-9], received [100]", ex.getMessage());
+        assertEquals(7, child.fold());
     }
 
     public void testSourceKeyword() {

From bfa06d963e6161c337d2b40b41295f42379aeeeb Mon Sep 17 00:00:00 2001
From: David Turner <david.turner@elastic.co>
Date: Wed, 17 Apr 2019 08:34:07 +0100
Subject: [PATCH 055/112] Do not create missing directories in readonly repo
 (#41249)

Today we erroneously look for a node setting called `readonly` when deciding
whether or not to create a missing directory in a filesystem repository. This
change fixes this by using the repository setting instead.

Closes #41009
Relates #26909
---
 .../common/blobstore/fs/FsBlobStore.java      | 13 +++--
 .../repositories/fs/FsRepository.java         |  2 +-
 .../fs/FsBlobStoreContainerTests.java         |  2 +-
 .../common/blobstore/fs/FsBlobStoreTests.java |  8 ++-
 .../fs/FsBlobStoreRepositoryIT.java           | 53 +++++++++++++++++++
 .../snapshots/BlobStoreFormatIT.java          |  3 +-
 6 files changed, 68 insertions(+), 13 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java
index eea30dd4e53..8a4d51e4dc9 100644
--- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java
+++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java
@@ -40,10 +40,10 @@ public class FsBlobStore implements BlobStore {
 
     private final boolean readOnly;
 
-    public FsBlobStore(Settings settings, Path path) throws IOException {
+    public FsBlobStore(Settings settings, Path path, boolean readonly) throws IOException {
         this.path = path;
-        this.readOnly = settings.getAsBoolean("readonly", false);
-        if (!this.readOnly) {
+        this.readOnly = readonly;
+        if (this.readOnly == false) {
             Files.createDirectories(path);
         }
         this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size",
@@ -74,6 +74,11 @@ public class FsBlobStore implements BlobStore {
 
     @Override
     public void delete(BlobPath path) throws IOException {
+        assert readOnly == false : "should not delete anything from a readonly repository: " + path;
+        //noinspection ConstantConditions in case assertions are disabled
+        if (readOnly) {
+            throw new ElasticsearchException("unexpectedly deleting [" + path + "] from a readonly repository");
+        }
         IOUtils.rm(buildPath(path));
     }
 
@@ -84,7 +89,7 @@ public class FsBlobStore implements BlobStore {
 
     private synchronized Path buildAndCreate(BlobPath path) throws IOException {
         Path f = buildPath(path);
-        if (!readOnly) {
+        if (readOnly == false) {
             Files.createDirectories(f);
         }
         return f;
diff --git a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java
index ea438f03bf1..a47ced0496d 100644
--- a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java
@@ -115,7 +115,7 @@ public class FsRepository extends BlobStoreRepository {
     protected BlobStore createBlobStore() throws Exception {
         final String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings());
         final Path locationFile = environment.resolveRepoFile(location);
-        return new FsBlobStore(environment.settings(), locationFile);
+        return new FsBlobStore(environment.settings(), locationFile, isReadOnly());
     }
 
     @Override
diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java
index 9230cded82b..7bd24aec8de 100644
--- a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java
+++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java
@@ -37,6 +37,6 @@ public class FsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
         } else {
             settings = Settings.EMPTY;
         }
-        return new FsBlobStore(settings, createTempDir());
+        return new FsBlobStore(settings, createTempDir(), false);
     }
 }
diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java
index 59e4ffd7927..4a1b1e1016f 100644
--- a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java
+++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java
@@ -42,15 +42,14 @@ public class FsBlobStoreTests extends ESBlobStoreTestCase {
         } else {
             settings = Settings.EMPTY;
         }
-        return new FsBlobStore(settings, createTempDir());
+        return new FsBlobStore(settings, createTempDir(), false);
     }
 
     public void testReadOnly() throws Exception {
-        Settings settings = Settings.builder().put("readonly", true).build();
         Path tempDir = createTempDir();
         Path path = tempDir.resolve("bar");
 
-        try (FsBlobStore store = new FsBlobStore(settings, path)) {
+        try (FsBlobStore store = new FsBlobStore(Settings.EMPTY, path, true)) {
             assertFalse(Files.exists(path));
             BlobPath blobPath = BlobPath.cleanPath().add("foo");
             store.blobContainer(blobPath);
@@ -61,8 +60,7 @@ public class FsBlobStoreTests extends ESBlobStoreTestCase {
             assertFalse(Files.exists(storePath));
         }
 
-        settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("readonly", false).build();
-        try (FsBlobStore store = new FsBlobStore(settings, path)) {
+        try (FsBlobStore store = new FsBlobStore(Settings.EMPTY, path, false)) {
             assertTrue(Files.exists(path));
             BlobPath blobPath = BlobPath.cleanPath().add("foo");
             BlobContainer container = store.blobContainer(blobPath);
diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java
index 1ed42cb2474..dd4ca7bfd20 100644
--- a/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java
+++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java
@@ -18,12 +18,22 @@
  */
 package org.elasticsearch.repositories.fs;
 
+import org.elasticsearch.ElasticsearchException;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.core.internal.io.IOUtils;
 import org.elasticsearch.repositories.Repository;
 import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase;
 
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
+import java.nio.file.Path;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.Stream;
+
 import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
 import static org.hamcrest.Matchers.instanceOf;
 
 public class FsBlobStoreRepositoryIT extends ESBlobStoreRepositoryIntegTestCase {
@@ -41,4 +51,47 @@ public class FsBlobStoreRepositoryIT extends ESBlobStoreRepositoryIntegTestCase
     protected void afterCreationCheck(Repository repository) {
         assertThat(repository, instanceOf(FsRepository.class));
     }
+
+    public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOException, ExecutionException, InterruptedException {
+        final String repoName = randomAsciiName();
+        final Path repoPath = randomRepoPath();
+
+        logger.info("--> creating repository {} at {}", repoName, repoPath);
+
+        assertAcked(client().admin().cluster().preparePutRepository(repoName).setType("fs").setSettings(Settings.builder()
+            .put("location", repoPath)
+            .put("compress", randomBoolean())
+            .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+        String indexName = randomAsciiName();
+        int docCount = iterations(10, 1000);
+        logger.info("-->  create random index {} with {} records", indexName, docCount);
+        addRandomDocuments(indexName, docCount);
+        assertHitCount(client().prepareSearch(indexName).setSize(0).get(), docCount);
+
+        final String snapshotName = randomAsciiName();
+        logger.info("-->  create snapshot {}:{}", repoName, snapshotName);
+        assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName)
+            .setWaitForCompletion(true).setIndices(indexName));
+
+        assertAcked(client().admin().indices().prepareDelete(indexName));
+        assertAcked(client().admin().cluster().prepareDeleteRepository(repoName));
+
+        final Path deletedPath;
+        try (Stream<Path> contents = Files.list(repoPath.resolve("indices"))) {
+            //noinspection OptionalGetWithoutIsPresent because we know there's a subdirectory
+            deletedPath = contents.filter(Files::isDirectory).findAny().get();
+            IOUtils.rm(deletedPath);
+        }
+        assertFalse(Files.exists(deletedPath));
+
+        assertAcked(client().admin().cluster().preparePutRepository(repoName).setType("fs").setSettings(Settings.builder()
+            .put("location", repoPath).put("readonly", true)));
+
+        final ElasticsearchException exception = expectThrows(ElasticsearchException.class, () ->
+            client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(randomBoolean()).get());
+        assertThat(exception.getRootCause(), instanceOf(NoSuchFileException.class));
+
+        assertFalse("deleted path is not recreated in readonly repository", Files.exists(deletedPath));
+    }
 }
diff --git a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java
index 6f4f69ad67e..4febd0695c9 100644
--- a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java
+++ b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java
@@ -238,8 +238,7 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase {
     }
 
     protected BlobStore createTestBlobStore() throws IOException {
-        Settings settings = Settings.builder().build();
-        return new FsBlobStore(settings, randomRepoPath());
+        return new FsBlobStore(Settings.EMPTY, randomRepoPath(), false);
     }
 
     protected void randomCorruption(BlobContainer blobContainer, String blobName) throws IOException {

From 711d2545aa80d434d384745067f6e300e12add7c Mon Sep 17 00:00:00 2001
From: David Kyle <david.kyle@elastic.co>
Date: Wed, 17 Apr 2019 09:01:15 +0100
Subject: [PATCH 056/112] [ML-DataFrame] Resolve random test failure using
 deterministic name (#41262)

---
 .../transforms/DataFrameTransformCheckpointTests.java         | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java
index 6fe896872f3..786fafc2c07 100644
--- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java
+++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java
@@ -91,8 +91,8 @@ public class DataFrameTransformCheckpointTests extends AbstractSerializingDataFr
                 .matches(new DataFrameTransformCheckpoint(id, timestamp, checkpoint, checkpointsByIndex, (timeUpperBound / 2) + 1)));
     }
 
-    @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/41076")
     public void testGetBehind() {
+        String baseIndexName = randomAlphaOfLength(8);
         String id = randomAlphaOfLengthBetween(1, 10);
         long timestamp = randomNonNegativeLong();
 
@@ -112,7 +112,7 @@ public class DataFrameTransformCheckpointTests extends AbstractSerializingDataFr
                 checkpoints2.add(shardCheckpoint + 10);
             }
 
-            String indexName = randomAlphaOfLengthBetween(1, 10);
+            String indexName = baseIndexName + i;
 
             checkpointsByIndexOld.put(indexName, checkpoints1.stream().mapToLong(l -> l).toArray());
             checkpointsByIndexNew.put(indexName, checkpoints2.stream().mapToLong(l -> l).toArray());

From badb7a22e0b663693bab0e79d207d94781389ce7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christoph=20B=C3=BCscher?= <cbuescher@posteo.de>
Date: Wed, 17 Apr 2019 10:21:44 +0200
Subject: [PATCH 057/112] Some cleanups in NoisyChannelSpellChecker (#40949)

One of the two #getCorrections methods is only used in tests, so we can move
it and any of the required helper methods to that test. Also reducing the
visibility of several methods to package private since the class isn't used
elsewhere outside the package.
---
 .../phrase/NoisyChannelSpellChecker.java      | 38 ++-------
 .../suggest/phrase/PhraseSuggester.java       | 12 ++-
 .../phrase/NoisyChannelSpellCheckerTests.java | 81 +++++++++++--------
 3 files changed, 63 insertions(+), 68 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java
index 7f225f1c3ea..9612d29f4f5 100644
--- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java
+++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java
@@ -18,48 +18,34 @@
  */
 package org.elasticsearch.search.suggest.phrase;
 
-import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.shingle.ShingleFilter;
 import org.apache.lucene.analysis.synonym.SynonymFilter;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 import org.apache.lucene.codecs.TermStats;
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.CharsRefBuilder;
 import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.Candidate;
 import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.CandidateSet;
 
-import java.io.CharArrayReader;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-//TODO public for tests
-public final class NoisyChannelSpellChecker {
+final class NoisyChannelSpellChecker {
     public static final double REAL_WORD_LIKELIHOOD = 0.95d;
     public static final int DEFAULT_TOKEN_LIMIT = 10;
     private final double realWordLikelihood;
     private final boolean requireUnigram;
     private final int tokenLimit;
 
-    public NoisyChannelSpellChecker() {
-        this(REAL_WORD_LIKELIHOOD);
-    }
-
-    public NoisyChannelSpellChecker(double nonErrorLikelihood) {
-        this(nonErrorLikelihood, true, DEFAULT_TOKEN_LIMIT);
-    }
-
-    public NoisyChannelSpellChecker(double nonErrorLikelihood, boolean requireUnigram, int tokenLimit) {
+    NoisyChannelSpellChecker(double nonErrorLikelihood, boolean requireUnigram, int tokenLimit) {
         this.realWordLikelihood = nonErrorLikelihood;
         this.requireUnigram = requireUnigram;
         this.tokenLimit = tokenLimit;
-
     }
 
-    public Result getCorrections(TokenStream stream, final CandidateGenerator generator,
+    Result getCorrections(TokenStream stream, final CandidateGenerator generator,
             float maxErrors, int numCorrections, WordScorer wordScorer, float confidence, int gramSize) throws IOException {
 
         final List<CandidateSet> candidateSetsList = new ArrayList<>();
@@ -131,26 +117,12 @@ public final class NoisyChannelSpellChecker {
         return new Result(bestCandidates, cutoffScore);
     }
 
-    public Result getCorrections(Analyzer analyzer, BytesRef query, CandidateGenerator generator,
-                                    float maxErrors, int numCorrections, IndexReader reader, String analysisField,
-                                    WordScorer scorer, float confidence, int gramSize) throws IOException {
-
-        return getCorrections(tokenStream(analyzer, query, new CharsRefBuilder(), analysisField), generator, maxErrors,
-            numCorrections, scorer, confidence, gramSize);
-
-    }
-
-    public TokenStream tokenStream(Analyzer analyzer, BytesRef query, CharsRefBuilder spare, String field) throws IOException {
-        spare.copyUTF8Bytes(query);
-        return analyzer.tokenStream(field, new CharArrayReader(spare.chars(), 0, spare.length()));
-    }
-
-    public static class Result {
+    static class Result {
         public static final Result EMPTY = new Result(Correction.EMPTY, Double.MIN_VALUE);
         public final Correction[] corrections;
         public final double cutoffScore;
 
-        public Result(Correction[] corrections, double cutoffScore) {
+        private Result(Correction[] corrections, double cutoffScore) {
             this.corrections = corrections;
             this.cutoffScore = cutoffScore;
         }
diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java
index 413afd155d4..d80fd68dacb 100644
--- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java
+++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java
@@ -19,6 +19,7 @@
 package org.elasticsearch.search.suggest.phrase;
 
 
+import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiTerms;
@@ -45,6 +46,7 @@ import org.elasticsearch.search.suggest.Suggester;
 import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext;
 import org.elasticsearch.search.suggest.phrase.NoisyChannelSpellChecker.Result;
 
+import java.io.CharArrayReader;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
@@ -93,11 +95,12 @@ public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> {
             WordScorer wordScorer = suggestion.model().newScorer(indexReader, suggestTerms, suggestField, realWordErrorLikelihood,
                     separator);
             Result checkerResult;
-            try (TokenStream stream = checker.tokenStream(suggestion.getAnalyzer(), suggestion.getText(), spare, suggestion.getField())) {
+            try (TokenStream stream = tokenStream(suggestion.getAnalyzer(), suggestion.getText(), spare,
+                    suggestion.getField())) {
                 checkerResult = checker.getCorrections(stream,
                         new MultiCandidateGeneratorWrapper(suggestion.getShardSize(), gens.toArray(new CandidateGenerator[gens.size()])),
                         suggestion.maxErrors(), suggestion.getShardSize(), wordScorer, suggestion.confidence(), suggestion.gramSize());
-                }
+            }
 
             PhraseSuggestion.Entry resultEntry = buildResultEntry(suggestion, spare, checkerResult.cutoffScore);
             response.addTerm(resultEntry);
@@ -144,6 +147,11 @@ public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> {
         return response;
     }
 
+    private static TokenStream tokenStream(Analyzer analyzer, BytesRef query, CharsRefBuilder spare, String field) throws IOException {
+        spare.copyUTF8Bytes(query);
+        return analyzer.tokenStream(field, new CharArrayReader(spare.chars(), 0, spare.length()));
+    }
+
     private static PhraseSuggestion.Entry buildResultEntry(SuggestionContext suggestion, CharsRefBuilder spare, double cutoffScore) {
         spare.copyUTF8Bytes(suggestion.getText());
         return new PhraseSuggestion.Entry(new Text(spare.toString()), 0, spare.length(), cutoffScore);
diff --git a/server/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java b/server/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java
index d819d880c86..94c5bf329eb 100644
--- a/server/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java
+++ b/server/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java
@@ -21,6 +21,7 @@ package org.elasticsearch.search.suggest.phrase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.LowerCaseFilter;
 import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
 import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
@@ -34,6 +35,7 @@ import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.MultiTerms;
@@ -42,14 +44,18 @@ import org.apache.lucene.search.spell.SuggestMode;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRefBuilder;
 import org.elasticsearch.search.suggest.phrase.NoisyChannelSpellChecker.Result;
 import org.elasticsearch.test.ESTestCase;
 
+import java.io.CharArrayReader;
 import java.io.IOException;
 import java.io.StringReader;
 import java.util.HashMap;
 import java.util.Map;
 
+import static org.elasticsearch.search.suggest.phrase.NoisyChannelSpellChecker.DEFAULT_TOKEN_LIMIT;
+import static org.elasticsearch.search.suggest.phrase.NoisyChannelSpellChecker.REAL_WORD_LIKELIHOOD;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.greaterThan;
 
@@ -113,12 +119,12 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         WordScorer wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.95d,
             new BytesRef(" "), 0.5f);
 
-        NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+        NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(REAL_WORD_LIKELIHOOD, true, DEFAULT_TOKEN_LIMIT);
         DirectSpellChecker spellchecker = new DirectSpellChecker();
         spellchecker.setMinQueryLength(1);
         DirectCandidateGenerator generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR,
             ir, 0.95, 5);
-        Result result = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1,
+        Result result = getCorrections(suggester, wrapper, new BytesRef("american ame"), generator, 1, 1,
             ir, "body", wordScorer, 1, 2);
         Correction[] corrections = result.corrections;
         assertThat(corrections.length, equalTo(1));
@@ -126,7 +132,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("american <em>ace</em>"));
         assertThat(result.cutoffScore, greaterThan(0d));
 
-        result = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1,
+        result = getCorrections(suggester, wrapper, new BytesRef("american ame"), generator, 1, 1,
             ir, "body", wordScorer, 0, 1);
         corrections = result.corrections;
         assertThat(corrections.length, equalTo(1));
@@ -134,10 +140,10 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("american ame"));
         assertThat(result.cutoffScore, equalTo(Double.MIN_VALUE));
 
-        suggester = new NoisyChannelSpellChecker(0.85);
+        suggester = new NoisyChannelSpellChecker(0.85, true, DEFAULT_TOKEN_LIMIT);
         wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d,
             new BytesRef(" "), 0.5f);
-        corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4,
             ir, "body", wordScorer, 0, 2).corrections;
         assertThat(corrections.length, equalTo(4));
         assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
@@ -149,7 +155,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         assertThat(corrections[2].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorn</em> the <em>god</em> jewel"));
         assertThat(corrections[3].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr</em> the got jewel"));
 
-        corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f,
             4, ir, "body", wordScorer, 1, 2).corrections;
         assertThat(corrections.length, equalTo(4));
         assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
@@ -158,10 +164,10 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         assertThat(corrections[3].join(space).utf8ToString(), equalTo("xorr the got jewel"));
 
         // Test some of the highlighting corner cases
-        suggester = new NoisyChannelSpellChecker(0.85);
+        suggester = new NoisyChannelSpellChecker(0.85, true, DEFAULT_TOKEN_LIMIT);
         wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d,
             new BytesRef(" "), 0.5f);
-        corrections = suggester.getCorrections(wrapper, new BytesRef("Xor teh Got-Jewel"), generator, 4f, 4,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("Xor teh Got-Jewel"), generator, 4f, 4,
             ir, "body", wordScorer, 1, 2).corrections;
         assertThat(corrections.length, equalTo(4));
         assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
@@ -195,17 +201,17 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         spellchecker.setAccuracy(0.0f);
         spellchecker.setMinPrefix(1);
         spellchecker.setMinQueryLength(1);
-        suggester = new NoisyChannelSpellChecker(0.85);
+        suggester = new NoisyChannelSpellChecker(0.85, true, DEFAULT_TOKEN_LIMIT);
         wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d,
             new BytesRef(" "), 0.5f);
-        corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4,
+        corrections = getCorrections(suggester, analyzer, new BytesRef("captian usa"), generator, 2, 4,
             ir, "body", wordScorer, 1, 2).corrections;
         assertThat(corrections[0].join(space).utf8ToString(), equalTo("captain america"));
         assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>captain america</em>"));
 
         generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85,
             10, null, analyzer, MultiTerms.getTerms(ir, "body"));
-        corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4,
+        corrections = getCorrections(suggester, analyzer, new BytesRef("captian usw"), generator, 2, 4,
             ir, "body", wordScorer, 1, 2).corrections;
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
         assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>captain america</em>"));
@@ -213,7 +219,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         // Make sure that user supplied text is not marked as highlighted in the presence of a synonym filter
         generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85,
             10, null, analyzer, MultiTerms.getTerms(ir, "body"));
-        corrections = suggester.getCorrections(analyzer, new BytesRef("captain usw"), generator, 2, 4, ir,
+        corrections = getCorrections(suggester, analyzer, new BytesRef("captain usw"), generator, 2, 4, ir,
             "body", wordScorer, 1, 2).corrections;
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
         assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("captain <em>america</em>"));
@@ -282,7 +288,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         DirectoryReader ir = DirectoryReader.open(writer);
         LaplaceScorer wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.95d,
             new BytesRef(" "), 0.5f);
-        NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+        NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(REAL_WORD_LIKELIHOOD, true, DEFAULT_TOKEN_LIMIT);
         DirectSpellChecker spellchecker = new DirectSpellChecker();
         spellchecker.setMinQueryLength(1);
         DirectCandidateGenerator forward = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_ALWAYS, ir,
@@ -291,27 +297,27 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
             0.95, 10, wrapper, wrapper,  MultiTerms.getTerms(ir, "body_reverse"));
         CandidateGenerator generator = new MultiCandidateGeneratorWrapper(10, forward, reverse);
 
-        Correction[] corrections = suggester.getCorrections(wrapper, new BytesRef("american cae"), generator, 1, 1,
+        Correction[] corrections = getCorrections(suggester, wrapper, new BytesRef("american cae"), generator, 1, 1,
             ir, "body", wordScorer, 1, 2).corrections;
         assertThat(corrections.length, equalTo(1));
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
 
         generator = new MultiCandidateGeneratorWrapper(5, forward, reverse);
-        corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("american ame"), generator, 1, 1, ir,
             "body", wordScorer, 1, 2).corrections;
         assertThat(corrections.length, equalTo(1));
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
 
-        corrections = suggester.getCorrections(wrapper, new BytesRef("american cae"), forward, 1, 1, ir,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("american cae"), forward, 1, 1, ir,
             "body", wordScorer, 1, 2).corrections;
         assertThat(corrections.length, equalTo(0)); // only use forward with constant prefix
 
-        corrections = suggester.getCorrections(wrapper, new BytesRef("america cae"), generator, 2, 1, ir,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("america cae"), generator, 2, 1, ir,
             "body", wordScorer, 1, 2).corrections;
         assertThat(corrections.length, equalTo(1));
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
 
-        corrections = suggester.getCorrections(wrapper, new BytesRef("Zorr the Got-Jewel"), generator, 0.5f, 4, ir,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("Zorr the Got-Jewel"), generator, 0.5f, 4, ir,
             "body", wordScorer, 0, 2).corrections;
         assertThat(corrections.length, equalTo(4));
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
@@ -319,18 +325,18 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("four the god jewel"));
 
 
-        corrections = suggester.getCorrections(wrapper, new BytesRef("Zorr the Got-Jewel"), generator, 0.5f, 1, ir,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("Zorr the Got-Jewel"), generator, 0.5f, 1, ir,
             "body", wordScorer, 1.5f, 2).corrections;
         assertThat(corrections.length, equalTo(1));
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
 
-        corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 1, ir,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 1, ir,
             "body", wordScorer, 1.5f, 2).corrections;
         assertThat(corrections.length, equalTo(1));
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
 
         // Test a special case where one of the suggest term is unchanged by the postFilter, 'II' here is unchanged by the reverse analyzer.
-        corrections = suggester.getCorrections(wrapper, new BytesRef("Quazar II"), generator, 1, 1, ir,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("Quazar II"), generator, 1, 1, ir,
             "body", wordScorer, 1, 2).corrections;
         assertThat(corrections.length, equalTo(1));
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("quasar ii"));
@@ -391,24 +397,24 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         WordScorer wordScorer = new LinearInterpolatingScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d,
             new BytesRef(" "), 0.5, 0.4, 0.1);
 
-        NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+        NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(REAL_WORD_LIKELIHOOD, true, DEFAULT_TOKEN_LIMIT);
         DirectSpellChecker spellchecker = new DirectSpellChecker();
         spellchecker.setMinQueryLength(1);
         DirectCandidateGenerator generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir,
             0.95, 5);
-        Correction[] corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1,
+        Correction[] corrections = getCorrections(suggester, wrapper, new BytesRef("american ame"), generator, 1, 1,
             ir, "body", wordScorer, 1, 3).corrections;
         assertThat(corrections.length, equalTo(1));
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
 
-        corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("american ame"), generator, 1, 1,
             ir, "body", wordScorer, 1, 1).corrections;
         assertThat(corrections.length, equalTo(0));
 //        assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ape"));
 
         wordScorer = new LinearInterpolatingScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d,
             new BytesRef(" "), 0.5, 0.4, 0.1);
-        corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4,
             ir, "body", wordScorer, 0, 3).corrections;
         assertThat(corrections.length, equalTo(4));
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
@@ -419,7 +425,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
 
 
 
-        corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4,
             ir, "body", wordScorer, 1, 3).corrections;
         assertThat(corrections.length, equalTo(4));
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
@@ -428,7 +434,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         assertThat(corrections[3].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the got jewel"));
 
 
-        corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 1,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 1,
             ir, "body", wordScorer, 100, 3).corrections;
         assertThat(corrections.length, equalTo(1));
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
@@ -456,23 +462,23 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         spellchecker.setAccuracy(0.0f);
         spellchecker.setMinPrefix(1);
         spellchecker.setMinQueryLength(1);
-        suggester = new NoisyChannelSpellChecker(0.95);
+        suggester = new NoisyChannelSpellChecker(0.95, true, DEFAULT_TOKEN_LIMIT);
         wordScorer = new LinearInterpolatingScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.95d,
             new BytesRef(" "),  0.5, 0.4, 0.1);
-        corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4,
+        corrections = getCorrections(suggester, analyzer, new BytesRef("captian usa"), generator, 2, 4,
             ir, "body", wordScorer, 1, 3).corrections;
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
 
         generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95,
             10, null, analyzer, MultiTerms.getTerms(ir, "body"));
-        corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4,
+        corrections = getCorrections(suggester, analyzer, new BytesRef("captian usw"), generator, 2, 4,
             ir, "body", wordScorer, 1, 3).corrections;
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
 
 
         wordScorer = new StupidBackoffScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d,
             new BytesRef(" "), 0.4);
-        corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 2,
+        corrections = getCorrections(suggester, wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 2,
             ir, "body", wordScorer, 0, 3).corrections;
         assertThat(corrections.length, equalTo(2));
         assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
@@ -494,11 +500,11 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
             try (DirectoryReader ir = DirectoryReader.open(dir)) {
                 WordScorer wordScorer = new StupidBackoffScorer(ir, MultiTerms.getTerms(ir, "field"), "field",  0.95d,
                     new BytesRef(" "), 0.4f);
-                NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+                NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(REAL_WORD_LIKELIHOOD, true, DEFAULT_TOKEN_LIMIT);
                 DirectSpellChecker spellchecker = new DirectSpellChecker();
                 DirectCandidateGenerator generator = new DirectCandidateGenerator(spellchecker, "field",
                     SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 5);
-                Result result = suggester.getCorrections(new StandardAnalyzer(), new BytesRef("valeu"), generator, 1, 1,
+                Result result = getCorrections(suggester, new StandardAnalyzer(), new BytesRef("valeu"), generator, 1, 1,
                     ir, "field", wordScorer, 1, 2);
                 assertThat(result.corrections.length, equalTo(1));
                 assertThat(result.corrections[0].join(space).utf8ToString(), equalTo("value"));
@@ -506,4 +512,13 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
         }
     }
 
+    private Result getCorrections(NoisyChannelSpellChecker checker, Analyzer analyzer, BytesRef query, CandidateGenerator generator,
+            float maxErrors, int numCorrections, IndexReader reader, String analysisField, WordScorer scorer, float confidence,
+            int gramSize) throws IOException {
+        CharsRefBuilder spare = new CharsRefBuilder();
+        spare.copyUTF8Bytes(query);
+        TokenStream tokenStream = analyzer.tokenStream(analysisField, new CharArrayReader(spare.chars(), 0, spare.length()));
+        return checker.getCorrections(tokenStream, generator, maxErrors, numCorrections, scorer, confidence, gramSize);
+    }
+
 }

From 6566979c189dc111ea0128469771aafb3c3c556e Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Wed, 17 Apr 2019 06:59:29 -0400
Subject: [PATCH 058/112] Always check for archiving broken index settings
 (#41209)

Today we check if an index has broken settings when checking if an index
needs to be upgraded. However, it can be the case that an index setting
became broken even if an index is already upgraded to the current
version if the user removed a plugin (or downgraded from the default
distribution to the non-default distribution) while on the same version
of Elasticsearch. In this case, some registered settings would go
missing and the index would now be broken. Yet, we miss this check and
instead of archiving the settings, the index becomes unassigned due to
the missing settings. This commit addresses this by checking for broken
settings whether or not the index is upgraded.
---
 .../metadata/MetaDataIndexUpgradeService.java |  6 +-
 .../admin/indices/create/CreateIndexIT.java   | 69 -------------------
 .../MetaDataIndexUpgradeServiceTests.java     | 40 +++++++----
 3 files changed, 33 insertions(+), 82 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
index 6bc9104000f..d3520da6702 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
@@ -88,7 +88,11 @@ public class MetaDataIndexUpgradeService {
     public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) {
         // Throws an exception if there are too-old segments:
         if (isUpgraded(indexMetaData)) {
-            return indexMetaData;
+            /*
+             * We still need to check for broken index settings since it might be that a user removed a plugin that registers a setting
+             * needed by this index.
+             */
+            return archiveBrokenIndexSettings(indexMetaData);
         }
         checkSupportedVersion(indexMetaData, minimumIndexCompatibilityVersion);
         IndexMetaData newMetaData = indexMetaData;
diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
index f23dbaa8ea4..27e3ffefd63 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
@@ -19,8 +19,6 @@
 
 package org.elasticsearch.action.admin.indices.create;
 
-import com.carrotsearch.hppc.cursors.ObjectCursor;
-import org.elasticsearch.ElasticsearchException;
 import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.UnavailableShardsException;
 import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
@@ -33,28 +31,18 @@ import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.metadata.MappingMetaData;
 import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.cluster.routing.IndexRoutingTable;
-import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
-import org.elasticsearch.cluster.routing.RoutingTable;
-import org.elasticsearch.cluster.routing.UnassignedInfo;
 import org.elasticsearch.common.collect.ImmutableOpenMap;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.gateway.MetaStateService;
 import org.elasticsearch.index.IndexNotFoundException;
 import org.elasticsearch.index.query.RangeQueryBuilder;
 import org.elasticsearch.test.ESIntegTestCase;
 import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
 import org.elasticsearch.test.ESIntegTestCase.Scope;
-import org.elasticsearch.test.InternalTestCluster;
 
 import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Set;
 import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.BiFunction;
 
@@ -63,12 +51,8 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
 import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
 import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
 import static org.hamcrest.Matchers.allOf;
-import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.greaterThan;
 import static org.hamcrest.Matchers.greaterThanOrEqualTo;
-import static org.hamcrest.Matchers.hasToString;
-import static org.hamcrest.Matchers.instanceOf;
 import static org.hamcrest.Matchers.lessThanOrEqualTo;
 import static org.hamcrest.core.IsNull.notNullValue;
 
@@ -396,57 +380,4 @@ public class CreateIndexIT extends ESIntegTestCase {
         assertEquals("Should have index name in response", "foo", response.index());
     }
 
-    public void testIndexWithUnknownSetting() throws Exception {
-        final int replicas = internalCluster().numDataNodes() - 1;
-        final Settings settings = Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", replicas).build();
-        client().admin().indices().prepareCreate("test").setSettings(settings).get();
-        ensureGreen("test");
-        final ClusterState state = client().admin().cluster().prepareState().get().getState();
-
-        final Set<String> dataOrMasterNodeNames = new HashSet<>();
-        for (final ObjectCursor<DiscoveryNode> node : state.nodes().getMasterAndDataNodes().values()) {
-            assertTrue(dataOrMasterNodeNames.add(node.value.getName()));
-        }
-
-        final IndexMetaData metaData = state.getMetaData().index("test");
-        internalCluster().fullRestart(new InternalTestCluster.RestartCallback() {
-            @Override
-            public Settings onNodeStopped(String nodeName) throws Exception {
-                if (dataOrMasterNodeNames.contains(nodeName)) {
-                    final MetaStateService metaStateService = internalCluster().getInstance(MetaStateService.class, nodeName);
-                    final IndexMetaData brokenMetaData =
-                            IndexMetaData
-                                    .builder(metaData)
-                                    .settings(Settings.builder().put(metaData.getSettings()).put("index.foo", true))
-                                    .build();
-                    // so evil
-                    metaStateService.writeIndexAndUpdateManifest("broken metadata", brokenMetaData);
-                }
-                return super.onNodeStopped(nodeName);
-            }
-        });
-
-        // check that the cluster does not keep reallocating shards
-        assertBusy(() -> {
-            final RoutingTable routingTable = client().admin().cluster().prepareState().get().getState().routingTable();
-            final IndexRoutingTable indexRoutingTable = routingTable.index("test");
-            assertNotNull(indexRoutingTable);
-            for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) {
-                assertTrue(shardRoutingTable.primaryShard().unassigned());
-                assertEquals(UnassignedInfo.AllocationStatus.DECIDERS_NO,
-                    shardRoutingTable.primaryShard().unassignedInfo().getLastAllocationStatus());
-                assertThat(shardRoutingTable.primaryShard().unassignedInfo().getNumFailedAllocations(), greaterThan(0));
-            }
-        }, 60, TimeUnit.SECONDS);
-        client().admin().indices().prepareClose("test").get();
-
-        // try to open the index
-        final ElasticsearchException e =
-                expectThrows(ElasticsearchException.class, () -> client().admin().indices().prepareOpen("test").get());
-        assertThat(e, hasToString(containsString("Failed to verify index " + metaData.getIndex())));
-        assertNotNull(e.getCause());
-        assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
-        assertThat(e, hasToString(containsString("unknown setting [index.foo]")));
-    }
-
 }
diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java
index c1e341fd5bc..50166bd42b3 100644
--- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java
@@ -28,12 +28,12 @@ import org.elasticsearch.test.VersionUtils;
 
 import java.util.Collections;
 
+import static org.hamcrest.Matchers.equalTo;
+
 public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
 
     public void testArchiveBrokenIndexSettings() {
-        MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(),
-            new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER),
-                IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, Collections.emptyList());
+        MetaDataIndexUpgradeService service = getMetaDataIndexUpgradeService();
         IndexMetaData src = newIndexMeta("foo", Settings.EMPTY);
         IndexMetaData indexMetaData = service.archiveBrokenIndexSettings(src);
         assertSame(indexMetaData, src);
@@ -58,10 +58,20 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
         assertSame(indexMetaData, src);
     }
 
+    public void testAlreadyUpgradedIndexArchivesBrokenIndexSettings() {
+        final MetaDataIndexUpgradeService service = getMetaDataIndexUpgradeService();
+        final IndexMetaData initial = newIndexMeta(
+            "foo",
+            Settings.builder().put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT).put("index.refresh_interval", "-200").build());
+        assertTrue(service.isUpgraded(initial));
+        final IndexMetaData after = service.upgradeIndexMetaData(initial, Version.CURRENT.minimumIndexCompatibilityVersion());
+        // the index does not need to be upgraded, but checking that it does should archive any broken settings
+        assertThat(after.getSettings().get("archived.index.refresh_interval"), equalTo("-200"));
+        assertNull(after.getSettings().get("index.refresh_interval"));
+    }
+
     public void testUpgrade() {
-        MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(),
-            new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER),
-                IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, Collections.emptyList());
+        MetaDataIndexUpgradeService service = getMetaDataIndexUpgradeService();
         IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build());
         assertFalse(service.isUpgraded(src));
         src = service.upgradeIndexMetaData(src, Version.CURRENT.minimumIndexCompatibilityVersion());
@@ -72,9 +82,7 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
     }
 
     public void testIsUpgraded() {
-        MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(),
-            new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER),
-                IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, Collections.emptyList());
+        MetaDataIndexUpgradeService service = getMetaDataIndexUpgradeService();
         IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build());
         assertFalse(service.isUpgraded(src));
         Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion());
@@ -85,9 +93,7 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
     }
 
     public void testFailUpgrade() {
-        MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(),
-            new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER),
-                IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, Collections.emptyList());
+        MetaDataIndexUpgradeService service = getMetaDataIndexUpgradeService();
         Version minCompat = Version.CURRENT.minimumIndexCompatibilityVersion();
         Version indexUpgraded = VersionUtils.randomVersionBetween(random(), minCompat, VersionUtils.getPreviousVersion(Version.CURRENT));
         Version indexCreated = Version.fromString((minCompat.major - 1) + "." + randomInt(5) + "." + randomInt(5));
@@ -141,6 +147,15 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
         assertEquals(message, "Cannot upgrade index foo");
     }
 
+    private MetaDataIndexUpgradeService getMetaDataIndexUpgradeService() {
+        return new MetaDataIndexUpgradeService(
+            Settings.EMPTY,
+            xContentRegistry(),
+            new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER),
+            IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
+            Collections.emptyList());
+    }
+
     public static IndexMetaData newIndexMeta(String name, Settings indexSettings) {
         Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
             .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
@@ -152,4 +167,5 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
             .build();
         return IndexMetaData.builder(name).settings(build).build();
     }
+
 }

From 1d2365f5b6dc774487f3622052ec32a3fa2f5962 Mon Sep 17 00:00:00 2001
From: David Kyle <david.kyle@elastic.co>
Date: Wed, 17 Apr 2019 14:56:30 +0100
Subject: [PATCH 059/112] [ML-DataFrame] Refactorings and tidying  (#41248)

Remove unnecessary generic params from SingleGroupSource
and unused code from the HLRC
---
 .../transforms/pivot/SingleGroupSource.java   | 29 ++-----------------
 .../transforms/DataFrameTransformConfig.java  |  2 +-
 .../pivot/DateHistogramGroupSource.java       |  4 +--
 .../transforms/pivot/GroupConfig.java         | 14 ++++-----
 .../pivot/HistogramGroupSource.java           |  4 +--
 .../transforms/pivot/PivotConfig.java         |  2 +-
 .../transforms/pivot/SingleGroupSource.java   |  8 ++---
 .../transforms/pivot/TermsGroupSource.java    |  4 +--
 .../transforms/pivot/GroupConfigTests.java    |  6 ++--
 ...TransportGetDataFrameTransformsAction.java |  4 +--
 ...nsportPreviewDataFrameTransformAction.java |  3 +-
 .../checkpoint/CheckpointException.java       | 13 ++-------
 .../transforms/DataFrameTransformTask.java    |  4 +--
 .../transforms/pivot/SchemaUtil.java          | 14 +++++----
 14 files changed, 39 insertions(+), 72 deletions(-)

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/SingleGroupSource.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/SingleGroupSource.java
index 8168d8850e7..b1234277d8c 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/SingleGroupSource.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/SingleGroupSource.java
@@ -30,32 +30,9 @@ public abstract class SingleGroupSource implements ToXContentObject {
     protected static final ParseField FIELD = new ParseField("field");
 
     public enum Type {
-        TERMS(0),
-        HISTOGRAM(1),
-        DATE_HISTOGRAM(2);
-
-        private final byte id;
-
-        Type(int id) {
-            this.id = (byte) id;
-        }
-
-        public byte getId() {
-            return id;
-        }
-
-        public static Type fromId(byte id) {
-            switch (id) {
-                case 0:
-                    return TERMS;
-                case 1:
-                    return HISTOGRAM;
-                case 2:
-                    return DATE_HISTOGRAM;
-                default:
-                    throw new IllegalArgumentException("unknown type");
-            }
-        }
+        TERMS,
+        HISTOGRAM,
+        DATE_HISTOGRAM;
 
         public String value() {
             return name().toLowerCase(Locale.ROOT);
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java
index a8e14faf2f0..89eab5605ca 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java
@@ -215,7 +215,7 @@ public class DataFrameTransformConfig extends AbstractDiffable<DataFrameTransfor
     }
 
     public static DataFrameTransformConfig fromXContent(final XContentParser parser, @Nullable final String optionalTransformId,
-            boolean lenient) throws IOException {
+            boolean lenient) {
 
         return lenient ? LENIENT_PARSER.apply(parser, optionalTransformId) : STRICT_PARSER.apply(parser, optionalTransformId);
     }
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java
index a60a7ef98e9..f4bf094235a 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java
@@ -20,7 +20,7 @@ import java.time.ZoneOffset;
 import java.util.Objects;
 
 
-public class DateHistogramGroupSource extends SingleGroupSource<DateHistogramGroupSource> {
+public class DateHistogramGroupSource extends SingleGroupSource {
 
     private static final String NAME = "data_frame_date_histogram_group";
     private static final ParseField TIME_ZONE = new ParseField("time_zone");
@@ -51,7 +51,7 @@ public class DateHistogramGroupSource extends SingleGroupSource<DateHistogramGro
             return new DateHistogramGroupSource(field);
         });
 
-        declareValuesSourceFields(parser, null);
+        declareValuesSourceFields(parser);
 
         parser.declareField((histogram, interval) -> {
             if (interval instanceof Long) {
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java
index de394fa3f19..e5ba14c381a 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java
@@ -40,9 +40,9 @@ public class GroupConfig implements Writeable, ToXContentObject {
     private static final Logger logger = LogManager.getLogger(GroupConfig.class);
 
     private final Map<String, Object> source;
-    private final Map<String, SingleGroupSource<?>> groups;
+    private final Map<String, SingleGroupSource> groups;
 
-    public GroupConfig(final Map<String, Object> source, final Map<String, SingleGroupSource<?>> groups) {
+    public GroupConfig(final Map<String, Object> source, final Map<String, SingleGroupSource> groups) {
         this.source = ExceptionsHelper.requireNonNull(source, DataFrameField.GROUP_BY.getPreferredName());
         this.groups = groups;
     }
@@ -64,7 +64,7 @@ public class GroupConfig implements Writeable, ToXContentObject {
         });
     }
 
-    public Map <String, SingleGroupSource<?>> getGroups() {
+    public Map <String, SingleGroupSource> getGroups() {
         return groups;
     }
 
@@ -109,7 +109,7 @@ public class GroupConfig implements Writeable, ToXContentObject {
     public static GroupConfig fromXContent(final XContentParser parser, boolean lenient) throws IOException {
         NamedXContentRegistry registry = parser.getXContentRegistry();
         Map<String, Object> source = parser.mapOrdered();
-        Map<String, SingleGroupSource<?>> groups = null;
+        Map<String, SingleGroupSource> groups = null;
 
         if (source.isEmpty()) {
             if (lenient) {
@@ -133,9 +133,9 @@ public class GroupConfig implements Writeable, ToXContentObject {
         return new GroupConfig(source, groups);
     }
 
-    private static Map<String, SingleGroupSource<?>> parseGroupConfig(final XContentParser parser,
+    private static Map<String, SingleGroupSource> parseGroupConfig(final XContentParser parser,
             boolean lenient) throws IOException {
-        LinkedHashMap<String, SingleGroupSource<?>> groups = new LinkedHashMap<>();
+        LinkedHashMap<String, SingleGroupSource> groups = new LinkedHashMap<>();
 
         // be parsing friendly, whether the token needs to be advanced or not (similar to what ObjectParser does)
         XContentParser.Token token;
@@ -158,7 +158,7 @@ public class GroupConfig implements Writeable, ToXContentObject {
 
             token = parser.nextToken();
             ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
-            SingleGroupSource<?> groupSource;
+            SingleGroupSource groupSource;
             switch (groupType) {
             case TERMS:
                 groupSource = TermsGroupSource.fromXContent(parser, lenient);
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/HistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/HistogramGroupSource.java
index 95e1068e0f4..737590a0cc1 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/HistogramGroupSource.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/HistogramGroupSource.java
@@ -17,7 +17,7 @@ import java.util.Objects;
 
 import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
 
-public class HistogramGroupSource extends SingleGroupSource<HistogramGroupSource> {
+public class HistogramGroupSource extends SingleGroupSource {
 
     static final ParseField INTERVAL = new ParseField("interval");
     private static final String NAME = "data_frame_histogram_group";
@@ -44,7 +44,7 @@ public class HistogramGroupSource extends SingleGroupSource<HistogramGroupSource
             double interval = (double) args[1];
             return new HistogramGroupSource(field, interval);
         });
-        declareValuesSourceFields(parser, null);
+        declareValuesSourceFields(parser);
         parser.declareDouble(optionalConstructorArg(), INTERVAL);
         return parser;
     }
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java
index 4eef08bd9c0..993ba78482a 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java
@@ -91,7 +91,7 @@ public class PivotConfig implements Writeable, ToXContentObject {
         builder.field(CompositeAggregationBuilder.SOURCES_FIELD_NAME.getPreferredName());
         builder.startArray();
 
-        for (Entry<String, SingleGroupSource<?>> groupBy : groups.getGroups().entrySet()) {
+        for (Entry<String, SingleGroupSource> groupBy : groups.getGroups().entrySet()) {
             builder.startObject();
             builder.startObject(groupBy.getKey());
             builder.field(groupBy.getValue().getType().value(), groupBy.getValue());
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/SingleGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/SingleGroupSource.java
index 8ba5caddb6f..0cdef0e4c3a 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/SingleGroupSource.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/SingleGroupSource.java
@@ -13,7 +13,6 @@ import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.xcontent.AbstractObjectParser;
 import org.elasticsearch.common.xcontent.ToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.search.aggregations.support.ValueType;
 
 import java.io.IOException;
 import java.util.Locale;
@@ -24,7 +23,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona
 /*
  * Base class for a single source for group_by
  */
-public abstract class SingleGroupSource<AB extends SingleGroupSource<AB>> implements Writeable, ToXContentObject {
+public abstract class SingleGroupSource implements Writeable, ToXContentObject {
 
     public enum Type {
         TERMS(0),
@@ -64,8 +63,7 @@ public abstract class SingleGroupSource<AB extends SingleGroupSource<AB>> implem
     // TODO: add script
     protected final String field;
 
-    static <VB extends SingleGroupSource<?>, T> void declareValuesSourceFields(AbstractObjectParser<VB, T> parser,
-            ValueType targetValueType) {
+    static <T> void declareValuesSourceFields(AbstractObjectParser<? extends SingleGroupSource, T> parser) {
         // either script or field
         parser.declareString(optionalConstructorArg(), FIELD);
     }
@@ -109,7 +107,7 @@ public abstract class SingleGroupSource<AB extends SingleGroupSource<AB>> implem
             return false;
         }
 
-        final SingleGroupSource<?> that = (SingleGroupSource<?>) other;
+        final SingleGroupSource that = (SingleGroupSource) other;
 
         return Objects.equals(this.field, that.field);
     }
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/TermsGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/TermsGroupSource.java
index 8c18e43be07..d4585a611b3 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/TermsGroupSource.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/TermsGroupSource.java
@@ -15,7 +15,7 @@ import java.io.IOException;
 /*
  * A terms aggregation source for group_by
  */
-public class TermsGroupSource extends SingleGroupSource<TermsGroupSource> {
+public class TermsGroupSource extends SingleGroupSource {
     private static final String NAME = "data_frame_terms_group";
 
     private static final ConstructingObjectParser<TermsGroupSource, Void> STRICT_PARSER = createParser(false);
@@ -27,7 +27,7 @@ public class TermsGroupSource extends SingleGroupSource<TermsGroupSource> {
             return new TermsGroupSource(field);
         });
 
-        SingleGroupSource.declareValuesSourceFields(parser, null);
+        SingleGroupSource.declareValuesSourceFields(parser);
         return parser;
     }
 
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java
index e503d887cf3..f7b95525842 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java
@@ -29,14 +29,14 @@ public class GroupConfigTests extends AbstractSerializingTestCase<GroupConfig> {
 
     public static GroupConfig randomGroupConfig() {
         Map<String, Object> source = new LinkedHashMap<>();
-        Map<String, SingleGroupSource<?>> groups = new LinkedHashMap<>();
+        Map<String, SingleGroupSource> groups = new LinkedHashMap<>();
 
         // ensure that the unlikely does not happen: 2 group_by's share the same name
         Set<String> names = new HashSet<>();
         for (int i = 0; i < randomIntBetween(1, 20); ++i) {
             String targetFieldName = randomAlphaOfLengthBetween(1, 20);
             if (names.add(targetFieldName)) {
-                SingleGroupSource<?> groupBy;
+                SingleGroupSource groupBy;
                 Type type = randomFrom(SingleGroupSource.Type.values());
                 switch (type) {
                 case TERMS:
@@ -88,7 +88,7 @@ public class GroupConfigTests extends AbstractSerializingTestCase<GroupConfig> {
         }
     }
 
-    private static Map<String, Object> getSource(SingleGroupSource<?> groupSource) {
+    private static Map<String, Object> getSource(SingleGroupSource groupSource) {
         try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) {
             XContentBuilder content = groupSource.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS);
             return XContentHelper.convertToMap(BytesReference.bytes(content), true, XContentType.JSON).v2();
diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java
index bbdd8a6dee8..00bc15b1db6 100644
--- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java
+++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java
@@ -28,8 +28,6 @@ import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsActio
 import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig;
 import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex;
 
-import java.io.IOException;
-
 import static org.elasticsearch.xpack.core.dataframe.DataFrameField.INDEX_DOC_TYPE;
 
 
@@ -62,7 +60,7 @@ public class TransportGetDataFrameTransformsAction extends AbstractTransportGetR
     }
 
     @Override
-    protected DataFrameTransformConfig parse(XContentParser parser) throws IOException {
+    protected DataFrameTransformConfig parse(XContentParser parser) {
         return DataFrameTransformConfig.fromXContent(parser, null, true);
     }
 
diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java
index b65830f72e7..b5642310df3 100644
--- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java
+++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java
@@ -85,9 +85,8 @@ public class TransportPreviewDataFrameTransformAction extends
                             DataFrameIndexerTransformStats stats = DataFrameIndexerTransformStats.withDefaultTransformId();
                             // remove all internal fields
                             List<Map<String, Object>> results = pivot.extractResults(agg, deducedMappings, stats)
-                                    .map(record -> {
+                                    .peek(record -> {
                                         record.keySet().removeIf(k -> k.startsWith("_"));
-                                        return record;
                                     }).collect(Collectors.toList());
                             listener.onResponse(results);
                         },
diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/CheckpointException.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/CheckpointException.java
index 0a0a50761f0..f8405d37057 100644
--- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/CheckpointException.java
+++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/CheckpointException.java
@@ -7,20 +7,13 @@
 package org.elasticsearch.xpack.dataframe.checkpoint;
 
 import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.common.io.stream.StreamInput;
 
-import java.io.IOException;
-
-public class CheckpointException extends ElasticsearchException {
-    public CheckpointException(String msg, Object... params) {
+class CheckpointException extends ElasticsearchException {
+    CheckpointException(String msg, Object... params) {
         super(msg, null, params);
     }
 
-    public CheckpointException(String msg, Throwable cause, Object... params) {
+    CheckpointException(String msg, Throwable cause, Object... params) {
         super(msg, cause, params);
     }
-
-    public CheckpointException(StreamInput in) throws IOException {
-        super(in);
-    }
 }
diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java
index 4088863a895..b8ceb2e7bd4 100644
--- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java
+++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java
@@ -240,7 +240,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S
     public synchronized void triggered(Event event) {
         //  for now no rerun, so only trigger if checkpoint == 0
         if (currentCheckpoint.get() == 0 && event.getJobName().equals(SCHEDULE_NAME + "_" + transform.getId())) {
-            logger.debug("Data frame indexer [" + event.getJobName() + "] schedule has triggered, state: [" + indexer.getState() + "]");
+            logger.debug("Data frame indexer [{}] schedule has triggered, state: [{}]", event.getJobName(), indexer.getState());
             indexer.maybeTriggerAsyncJob(System.currentTimeMillis());
         }
     }
@@ -336,7 +336,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S
         @Override
         public synchronized boolean maybeTriggerAsyncJob(long now) {
             if (taskState.get() == DataFrameTransformTaskState.FAILED) {
-                logger.debug("Schedule was triggered for transform [" + getJobId() + "] but task is failed.  Ignoring trigger.");
+                logger.debug("Schedule was triggered for transform [{}] but task is failed. Ignoring trigger.", getJobId());
                 return false;
             }
 
diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java
index deb4afdb73d..95798e4c564 100644
--- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java
+++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java
@@ -133,10 +133,11 @@ public final class SchemaUtil {
             String sourceMapping = sourceFieldName == null ? null : sourceMappings.get(sourceFieldName);
             String destinationMapping = Aggregations.resolveTargetMapping(aggregationName, sourceMapping);
 
-            logger.debug(
-                    "Deduced mapping for: [" + targetFieldName + "], agg type [" + aggregationName + "] to [" + destinationMapping + "]");
+            logger.debug("Deduced mapping for: [{}], agg type [{}] to [{}]",
+                    targetFieldName, aggregationName, destinationMapping);
+
             if (Aggregations.isDynamicMapping(destinationMapping)) {
-                logger.info("Dynamic target mapping set for field ["+ targetFieldName +"] and aggregation [" + aggregationName +"]");
+                logger.debug("Dynamic target mapping set for field [{}] and aggregation [{}]", targetFieldName, aggregationName);
             } else if (destinationMapping != null) {
                 targetMapping.put(targetFieldName, destinationMapping);
             } else {
@@ -146,8 +147,7 @@ public final class SchemaUtil {
 
         fieldNamesForGrouping.forEach((targetFieldName, sourceFieldName) -> {
             String destinationMapping = sourceMappings.get(sourceFieldName);
-            logger.debug(
-                    "Deduced mapping for: [" + targetFieldName + "] to [" + destinationMapping + "]");
+            logger.debug("Deduced mapping for: [{}] to [{}]", targetFieldName, destinationMapping);
             if (destinationMapping != null) {
                 targetMapping.put(targetFieldName, destinationMapping);
             } else {
@@ -187,7 +187,9 @@ public final class SchemaUtil {
                             final Map<?, ?> map = (Map<?, ?>) typeMap;
                             if (map.containsKey("type")) {
                                 String type = map.get("type").toString();
-                                logger.debug("Extracted type for [" + fieldName + "] : [" + type + "] from index [" + indexName +"]");
+                                if (logger.isTraceEnabled()) {
+                                    logger.trace("Extracted type for [" + fieldName + "] : [" + type + "] from index [" + indexName + "]");
+                                }
                                 // TODO: overwrites types, requires resolve if
                                 // types are mixed
                                 extractedTypes.put(fieldName, type);

From 9fd5237fd453db6d696e642d1f9a2f595f81bf03 Mon Sep 17 00:00:00 2001
From: Adrien Grand <jpountz@gmail.com>
Date: Wed, 17 Apr 2019 16:10:53 +0200
Subject: [PATCH 060/112] Clean up Node#close. (#39317) (#41301)

`Node#close` is pretty hard to rely on today:
 - it might swallow exceptions
 - it waits for 10 seconds for threads to terminate but doesn't signal anything
   if threads are still not terminated after 10 seconds

This commit makes `IOException`s propagated and splits `Node#close` into
`Node#close` and `Node#awaitClose` so that the decision what to do if a node
takes too long to close can be done on top of `Node#close`.

It also adds synchronization to lifecycle transitions to make them atomic. I
don't think it is a source of problems today, but it makes things easier to
reason about.
---
 .../elasticsearch/bootstrap/Bootstrap.java    | 14 +++
 .../component/AbstractLifecycleComponent.java | 88 ++++++++---------
 .../common/component/Lifecycle.java           | 25 +++--
 .../common/util/concurrent/ThreadContext.java | 13 ++-
 .../elasticsearch/indices/IndicesService.java | 16 ++++
 .../java/org/elasticsearch/node/Node.java     | 51 ++++++----
 .../org/elasticsearch/node/NodeService.java   |  9 ++
 .../elasticsearch/transport/TcpTransport.java |  1 +
 .../transport/TransportKeepAlive.java         |  6 +-
 .../indices/IndicesServiceCloseTests.java     |  8 ++
 .../org/elasticsearch/node/NodeTests.java     | 94 +++++++++++++++++++
 .../search/SearchServiceTests.java            | 23 ++---
 .../test/ESSingleNodeTestCase.java            |  8 +-
 .../test/InternalTestCluster.java             |  7 ++
 14 files changed, 278 insertions(+), 85 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
index f9d6090f3f6..e1ca9d7d2f9 100644
--- a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
+++ b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
@@ -61,6 +61,7 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Locale;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 
 /**
  * Internal startup code.
@@ -185,8 +186,15 @@ final class Bootstrap {
                         IOUtils.close(node, spawner);
                         LoggerContext context = (LoggerContext) LogManager.getContext(false);
                         Configurator.shutdown(context);
+                        if (node != null && node.awaitClose(10, TimeUnit.SECONDS) == false) {
+                            throw new IllegalStateException("Node didn't stop within 10 seconds. " +
+                                    "Any outstanding requests or tasks might get killed.");
+                        }
                     } catch (IOException ex) {
                         throw new ElasticsearchException("failed to stop node", ex);
+                    } catch (InterruptedException e) {
+                        LogManager.getLogger(Bootstrap.class).warn("Thread got interrupted while waiting for the node to shutdown.");
+                        Thread.currentThread().interrupt();
                     }
                 }
             });
@@ -269,6 +277,12 @@ final class Bootstrap {
     static void stop() throws IOException {
         try {
             IOUtils.close(INSTANCE.node, INSTANCE.spawner);
+            if (INSTANCE.node != null && INSTANCE.node.awaitClose(10, TimeUnit.SECONDS) == false) {
+                throw new IllegalStateException("Node didn't stop within 10 seconds. Any outstanding requests or tasks might get killed.");
+            }
+        } catch (InterruptedException e) {
+            LogManager.getLogger(Bootstrap.class).warn("Thread got interrupted while waiting for the node to shutdown.");
+            Thread.currentThread().interrupt();
         } finally {
             INSTANCE.keepAliveLatch.countDown();
         }
diff --git a/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java b/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java
index 772d2d89cf5..a7f72c63091 100644
--- a/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java
+++ b/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java
@@ -19,15 +19,12 @@
 
 package org.elasticsearch.common.component;
 
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
 import java.io.IOException;
+import java.io.UncheckedIOException;
 import java.util.List;
 import java.util.concurrent.CopyOnWriteArrayList;
 
 public abstract class AbstractLifecycleComponent implements LifecycleComponent {
-    private static final Logger logger = LogManager.getLogger(AbstractLifecycleComponent.class);
 
     protected final Lifecycle lifecycle = new Lifecycle();
 
@@ -52,16 +49,18 @@ public abstract class AbstractLifecycleComponent implements LifecycleComponent {
 
     @Override
     public void start() {
-        if (!lifecycle.canMoveToStarted()) {
-            return;
-        }
-        for (LifecycleListener listener : listeners) {
-            listener.beforeStart();
-        }
-        doStart();
-        lifecycle.moveToStarted();
-        for (LifecycleListener listener : listeners) {
-            listener.afterStart();
+        synchronized (lifecycle) {
+            if (!lifecycle.canMoveToStarted()) {
+                return;
+            }
+            for (LifecycleListener listener : listeners) {
+                listener.beforeStart();
+            }
+            doStart();
+            lifecycle.moveToStarted();
+            for (LifecycleListener listener : listeners) {
+                listener.afterStart();
+            }
         }
     }
 
@@ -69,16 +68,18 @@ public abstract class AbstractLifecycleComponent implements LifecycleComponent {
 
     @Override
     public void stop() {
-        if (!lifecycle.canMoveToStopped()) {
-            return;
-        }
-        for (LifecycleListener listener : listeners) {
-            listener.beforeStop();
-        }
-        lifecycle.moveToStopped();
-        doStop();
-        for (LifecycleListener listener : listeners) {
-            listener.afterStop();
+        synchronized (lifecycle) {
+            if (!lifecycle.canMoveToStopped()) {
+                return;
+            }
+            for (LifecycleListener listener : listeners) {
+                listener.beforeStop();
+            }
+            lifecycle.moveToStopped();
+            doStop();
+            for (LifecycleListener listener : listeners) {
+                listener.afterStop();
+            }
         }
     }
 
@@ -86,25 +87,26 @@ public abstract class AbstractLifecycleComponent implements LifecycleComponent {
 
     @Override
     public void close() {
-        if (lifecycle.started()) {
-            stop();
-        }
-        if (!lifecycle.canMoveToClosed()) {
-            return;
-        }
-        for (LifecycleListener listener : listeners) {
-            listener.beforeClose();
-        }
-        lifecycle.moveToClosed();
-        try {
-            doClose();
-        } catch (IOException e) {
-            // TODO: we need to separate out closing (ie shutting down) services, vs releasing runtime transient
-            // structures. Shutting down services should use IOUtils.close
-            logger.warn("failed to close " + getClass().getName(), e);
-        }
-        for (LifecycleListener listener : listeners) {
-            listener.afterClose();
+        synchronized (lifecycle) {
+            if (lifecycle.started()) {
+                stop();
+            }
+            if (!lifecycle.canMoveToClosed()) {
+                return;
+            }
+            for (LifecycleListener listener : listeners) {
+                listener.beforeClose();
+            }
+            lifecycle.moveToClosed();
+            try {
+                doClose();
+            } catch (IOException e) {
+                throw new UncheckedIOException(e);
+            } finally {
+                for (LifecycleListener listener : listeners) {
+                    listener.afterClose();
+                }
+            }
         }
     }
 
diff --git a/server/src/main/java/org/elasticsearch/common/component/Lifecycle.java b/server/src/main/java/org/elasticsearch/common/component/Lifecycle.java
index e71c9b03899..82042ab2b7d 100644
--- a/server/src/main/java/org/elasticsearch/common/component/Lifecycle.java
+++ b/server/src/main/java/org/elasticsearch/common/component/Lifecycle.java
@@ -39,15 +39,22 @@ package org.elasticsearch.common.component;
  * }
  * </pre>
  * <p>
+ * NOTE: The Lifecycle class is thread-safe. It is also possible to prevent concurrent state transitions
+ * by locking on the Lifecycle object itself. This is typically useful when chaining multiple transitions.
+ * <p>
  * Note, closed is only allowed to be called when stopped, so make sure to stop the component first.
- * Here is how the logic can be applied:
+ * Here is how the logic can be applied. A lock of the {@code lifecycleState} object is taken so that
+ * another thread cannot move the state from {@code STOPPED} to {@code STARTED} before it has moved to
+ * {@code CLOSED}.
  * <pre>
  * public void close() {
- *  if (lifecycleState.started()) {
- *      stop();
- *  }
- *  if (!lifecycleState.moveToClosed()) {
- *      return;
+ *  synchronized (lifecycleState) {
+ *      if (lifecycleState.started()) {
+ *          stop();
+ *      }
+ *      if (!lifecycleState.moveToClosed()) {
+ *          return;
+ *      }
  *  }
  *  // perform close logic here
  * }
@@ -116,7 +123,7 @@ public class Lifecycle {
     }
 
 
-    public boolean moveToStarted() throws IllegalStateException {
+    public synchronized boolean moveToStarted() throws IllegalStateException {
         State localState = this.state;
         if (localState == State.INITIALIZED || localState == State.STOPPED) {
             state = State.STARTED;
@@ -145,7 +152,7 @@ public class Lifecycle {
         throw new IllegalStateException("Can't move to stopped with unknown state");
     }
 
-    public boolean moveToStopped() throws IllegalStateException {
+    public synchronized boolean moveToStopped() throws IllegalStateException {
         State localState = state;
         if (localState == State.STARTED) {
             state = State.STOPPED;
@@ -171,7 +178,7 @@ public class Lifecycle {
         return true;
     }
 
-    public boolean moveToClosed() throws IllegalStateException {
+    public synchronized boolean moveToClosed() throws IllegalStateException {
         State localState = state;
         if (localState == State.CLOSED) {
             return false;
diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java
index 01e23843777..80a9a30032e 100644
--- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java
+++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java
@@ -131,7 +131,18 @@ public final class ThreadContext implements Closeable, Writeable {
     public StoredContext stashContext() {
         final ThreadContextStruct context = threadLocal.get();
         threadLocal.set(null);
-        return () -> threadLocal.set(context);
+        return () -> {
+            // If the node and thus the threadLocal get closed while this task
+            // is still executing, we don't want this runnable to fail with an
+            // uncaught exception
+            try {
+                threadLocal.set(context);
+            } catch (IllegalStateException e) {
+                if (isClosed() == false) {
+                    throw e;
+                }
+            }
+        };
     }
 
     /**
diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java
index 82570e32947..e07a83e2031 100644
--- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java
+++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java
@@ -23,6 +23,7 @@ import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 import org.apache.logging.log4j.message.ParameterizedMessage;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader.CacheHelper;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.util.CollectionUtil;
@@ -201,6 +202,7 @@ public class IndicesService extends AbstractLifecycleComponent
     private final Collection<Function<IndexSettings, Optional<EngineFactory>>> engineFactoryProviders;
     private final Map<String, Function<IndexSettings, IndexStore>> indexStoreFactories;
     final AbstractRefCounted indicesRefCount; // pkg-private for testing
+    private final CountDownLatch closeLatch = new CountDownLatch(1);
 
     @Override
     protected void doStart() {
@@ -274,6 +276,8 @@ public class IndicesService extends AbstractLifecycleComponent
                             indicesQueryCache);
                 } catch (IOException e) {
                     throw new UncheckedIOException(e);
+                } finally {
+                    closeLatch.countDown();
                 }
             }
         };
@@ -312,6 +316,18 @@ public class IndicesService extends AbstractLifecycleComponent
         indicesRefCount.decRef();
     }
 
+    /**
+     * Wait for this {@link IndicesService} to be effectively closed. When this returns {@code true}, all shards and shard stores
+     * are closed and all shard {@link CacheHelper#addClosedListener(org.apache.lucene.index.IndexReader.ClosedListener) closed
+     * listeners} have run. However some {@link IndexEventListener#onStoreClosed(ShardId) shard closed listeners} might not have
+     * run.
+     * @returns true if all shards closed within the given timeout, false otherwise
+     * @throws InterruptedException if the current thread got interrupted while waiting for shards to close
+     */
+    public boolean awaitClose(long timeout, TimeUnit timeUnit) throws InterruptedException {
+        return closeLatch.await(timeout, timeUnit);
+    }
+
     /**
      * Returns the node stats indices stats. The {@code includePrevious} flag controls
      * if old shards stats will be aggregated as well (only for relevant stats, such as
diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java
index 8484be006ec..42f80dbd87c 100644
--- a/server/src/main/java/org/elasticsearch/node/Node.java
+++ b/server/src/main/java/org/elasticsearch/node/Node.java
@@ -781,11 +781,13 @@ public class Node implements Closeable {
     // In this case the process will be terminated even if the first call to close() has not finished yet.
     @Override
     public synchronized void close() throws IOException {
-        if (lifecycle.started()) {
-            stop();
-        }
-        if (!lifecycle.moveToClosed()) {
-            return;
+        synchronized (lifecycle) {
+            if (lifecycle.started()) {
+                stop();
+            }
+            if (!lifecycle.moveToClosed()) {
+                return;
+            }
         }
 
         logger.info("closing ...");
@@ -833,21 +835,12 @@ public class Node implements Closeable {
         toClose.add(injector.getInstance(ScriptService.class));
 
         toClose.add(() -> stopWatch.stop().start("thread_pool"));
-        // TODO this should really use ThreadPool.terminate()
         toClose.add(() -> injector.getInstance(ThreadPool.class).shutdown());
-        toClose.add(() -> {
-            try {
-                injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);
-            } catch (InterruptedException e) {
-                // ignore
-            }
-        });
-
-        toClose.add(() -> stopWatch.stop().start("thread_pool_force_shutdown"));
-        toClose.add(() -> injector.getInstance(ThreadPool.class).shutdownNow());
+        // Don't call shutdownNow here, it might break ongoing operations on Lucene indices.
+        // See https://issues.apache.org/jira/browse/LUCENE-7248. We call shutdownNow in
+        // awaitClose if the node doesn't finish closing within the specified time.
         toClose.add(() -> stopWatch.stop());
 
-
         toClose.add(injector.getInstance(NodeEnvironment.class));
         toClose.add(injector.getInstance(PageCacheRecycler.class));
 
@@ -858,6 +851,30 @@ public class Node implements Closeable {
         logger.info("closed");
     }
 
+    /**
+     * Wait for this node to be effectively closed.
+     */
+    // synchronized to prevent running concurrently with close()
+    public synchronized boolean awaitClose(long timeout, TimeUnit timeUnit) throws InterruptedException {
+        if (lifecycle.closed() == false) {
+            // We don't want to shutdown the threadpool or interrupt threads on a node that is not
+            // closed yet.
+            throw new IllegalStateException("Call close() first");
+        }
+
+
+        ThreadPool threadPool = injector.getInstance(ThreadPool.class);
+        final boolean terminated = ThreadPool.terminate(threadPool, timeout, timeUnit);
+        if (terminated) {
+            // All threads terminated successfully. Because search, recovery and all other operations
+            // that run on shards run in the threadpool, indices should be effectively closed by now.
+            if (nodeService.awaitClose(0, TimeUnit.MILLISECONDS) == false) {
+                throw new IllegalStateException("Some shards are still open after the threadpool terminated. " +
+                        "Something is leaking index readers or store references.");
+            }
+        }
+        return terminated;
+    }
 
     /**
      * Returns {@code true} if the node is closed.
diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java
index 0567641b8a5..3f71a21966c 100644
--- a/server/src/main/java/org/elasticsearch/node/NodeService.java
+++ b/server/src/main/java/org/elasticsearch/node/NodeService.java
@@ -43,6 +43,7 @@ import org.elasticsearch.transport.TransportService;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.concurrent.TimeUnit;
 
 public class NodeService implements Closeable {
     private final Settings settings;
@@ -135,4 +136,12 @@ public class NodeService implements Closeable {
         IOUtils.close(indicesService);
     }
 
+    /**
+     * Wait for the node to be effectively closed.
+     * @see IndicesService#awaitClose(long, TimeUnit)
+     */
+    public boolean awaitClose(long timeout, TimeUnit timeUnit) throws InterruptedException {
+        return indicesService.awaitClose(timeout, timeUnit);
+    }
+
 }
diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java
index eb61af8d2a3..42d61301635 100644
--- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java
+++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java
@@ -360,6 +360,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
         final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
         closeLock.writeLock().lock();
         try {
+            // No need for locking here since Lifecycle objects can't move from STARTED to INITIALIZED
             if (lifecycle.initialized() == false && lifecycle.started() == false) {
                 throw new IllegalStateException("transport has been stopped");
             }
diff --git a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java
index 571ced1c118..fc7ebe4b964 100644
--- a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java
+++ b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java
@@ -136,8 +136,10 @@ final class TransportKeepAlive implements Closeable {
 
     @Override
     public void close() {
-        lifecycle.moveToStopped();
-        lifecycle.moveToClosed();
+        synchronized (lifecycle) {
+            lifecycle.moveToStopped();
+            lifecycle.moveToClosed();
+        }
     }
 
     private class ScheduledPing extends AbstractLifecycleRunnable {
diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java
index 15b45330530..e22253be7fc 100644
--- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java
+++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java
@@ -49,6 +49,7 @@ import org.elasticsearch.transport.nio.MockNioTransportPlugin;
 
 import java.nio.file.Path;
 import java.util.Arrays;
+import java.util.concurrent.TimeUnit;
 import java.util.Collections;
 
 import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING;
@@ -95,8 +96,10 @@ public class IndicesServiceCloseTests extends ESTestCase {
         Node node = startNode();
         IndicesService indicesService = node.injector().getInstance(IndicesService.class);
         assertEquals(1, indicesService.indicesRefCount.refCount());
+        assertFalse(indicesService.awaitClose(0, TimeUnit.MILLISECONDS));
         node.close();
         assertEquals(0, indicesService.indicesRefCount.refCount());
+        assertTrue(indicesService.awaitClose(0, TimeUnit.MILLISECONDS));
     }
 
     public void testCloseNonEmptyIndicesService() throws Exception {
@@ -108,9 +111,11 @@ public class IndicesServiceCloseTests extends ESTestCase {
                 .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)));
 
         assertEquals(2, indicesService.indicesRefCount.refCount());
+        assertFalse(indicesService.awaitClose(0, TimeUnit.MILLISECONDS));
 
         node.close();
         assertEquals(0, indicesService.indicesRefCount.refCount());
+        assertTrue(indicesService.awaitClose(0, TimeUnit.MILLISECONDS));
     }
 
     public void testCloseWithIncedRefStore() throws Exception {
@@ -126,12 +131,15 @@ public class IndicesServiceCloseTests extends ESTestCase {
         IndexService indexService = indicesService.iterator().next();
         IndexShard shard = indexService.getShard(0);
         shard.store().incRef();
+        assertFalse(indicesService.awaitClose(0, TimeUnit.MILLISECONDS));
 
         node.close();
         assertEquals(1, indicesService.indicesRefCount.refCount());
+        assertFalse(indicesService.awaitClose(0, TimeUnit.MILLISECONDS));
 
         shard.store().decRef();
         assertEquals(0, indicesService.indicesRefCount.refCount());
+        assertTrue(indicesService.awaitClose(0, TimeUnit.MILLISECONDS));
     }
 
     public void testCloseWhileOngoingRequest() throws Exception {
diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java
index 288817d5c77..6f0419421b8 100644
--- a/server/src/test/java/org/elasticsearch/node/NodeTests.java
+++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java
@@ -26,18 +26,30 @@ import org.elasticsearch.common.network.NetworkModule;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.transport.BoundTransportAddress;
 import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.engine.Engine.Searcher;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.indices.IndicesService;
 import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.test.InternalTestCluster;
 import org.elasticsearch.test.MockHttpTransport;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.hamcrest.Matchers;
 
 import java.io.IOException;
 import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+
 @LuceneTestCase.SuppressFileSystems(value = "ExtrasFS")
 public class NodeTests extends ESTestCase {
 
@@ -136,5 +148,87 @@ public class NodeTests extends ESTestCase {
                 .put(Node.NODE_DATA_SETTING.getKey(), true);
     }
 
+    public void testCloseOnOutstandingTask() throws Exception {
+        Node node = new MockNode(baseSettings().build(), basePlugins());
+        node.start();
+        ThreadPool threadpool = node.injector().getInstance(ThreadPool.class);
+        AtomicBoolean shouldRun = new AtomicBoolean(true);
+        threadpool.executor(ThreadPool.Names.SEARCH).execute(() -> {
+            while (shouldRun.get());
+        });
+        node.close();
+        shouldRun.set(false);
+        assertTrue(node.awaitClose(1, TimeUnit.DAYS));
+    }
 
+    public void testAwaitCloseTimeoutsOnNonInterruptibleTask() throws Exception {
+        Node node = new MockNode(baseSettings().build(), basePlugins());
+        node.start();
+        ThreadPool threadpool = node.injector().getInstance(ThreadPool.class);
+        AtomicBoolean shouldRun = new AtomicBoolean(true);
+        threadpool.executor(ThreadPool.Names.SEARCH).execute(() -> {
+            while (shouldRun.get());
+        });
+        node.close();
+        assertFalse(node.awaitClose(0, TimeUnit.MILLISECONDS));
+        shouldRun.set(false);
+    }
+
+    public void testCloseOnInterruptibleTask() throws Exception {
+        Node node = new MockNode(baseSettings().build(), basePlugins());
+        node.start();
+        ThreadPool threadpool = node.injector().getInstance(ThreadPool.class);
+        CountDownLatch latch = new CountDownLatch(1);
+        final CountDownLatch finishLatch = new CountDownLatch(1);
+        final AtomicBoolean interrupted = new AtomicBoolean(false);
+        threadpool.executor(ThreadPool.Names.SEARCH).execute(() -> {
+            try {
+                latch.await();
+            } catch (InterruptedException e) {
+                interrupted.set(true);
+                Thread.currentThread().interrupt();
+            } finally {
+                finishLatch.countDown();
+            }
+        });
+        node.close();
+        // close should not interrput ongoing tasks
+        assertFalse(interrupted.get());
+        // but awaitClose should
+        node.awaitClose(0, TimeUnit.SECONDS);
+        finishLatch.await();
+        assertTrue(interrupted.get());
+    }
+
+    public void testCloseOnLeakedIndexReaderReference() throws Exception {
+        Node node = new MockNode(baseSettings().build(), basePlugins());
+        node.start();
+        IndicesService indicesService = node.injector().getInstance(IndicesService.class);
+        assertAcked(node.client().admin().indices().prepareCreate("test")
+                .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)));
+        IndexService indexService = indicesService.iterator().next();
+        IndexShard shard = indexService.getShard(0);
+        Searcher searcher = shard.acquireSearcher("test");
+        node.close();
+
+        IllegalStateException e = expectThrows(IllegalStateException.class, () -> node.awaitClose(1, TimeUnit.DAYS));
+        searcher.close();
+        assertThat(e.getMessage(), Matchers.containsString("Something is leaking index readers or store references"));
+    }
+
+    public void testCloseOnLeakedStoreReference() throws Exception {
+        Node node = new MockNode(baseSettings().build(), basePlugins());
+        node.start();
+        IndicesService indicesService = node.injector().getInstance(IndicesService.class);
+        assertAcked(node.client().admin().indices().prepareCreate("test")
+                .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)));
+        IndexService indexService = indicesService.iterator().next();
+        IndexShard shard = indexService.getShard(0);
+        shard.store().incRef();
+        node.close();
+
+        IllegalStateException e = expectThrows(IllegalStateException.class, () -> node.awaitClose(1, TimeUnit.DAYS));
+        shard.store().decRef();
+        assertThat(e.getMessage(), Matchers.containsString("Something is leaking index readers or store references"));
+    }
 }
diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java
index 641f1a1c19b..90957c2779e 100644
--- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java
@@ -652,16 +652,17 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
         searchRequest.allowPartialSearchResults(randomBoolean());
         ShardSearchTransportRequest request = new ShardSearchTransportRequest(OriginalIndices.NONE, searchRequest, shardId,
             indexService.numberOfShards(), AliasFilter.EMPTY, 1f, nowInMillis, clusterAlias, Strings.EMPTY_ARRAY);
-        DefaultSearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()));
-        SearchShardTarget searchShardTarget = searchContext.shardTarget();
-        QueryShardContext queryShardContext = searchContext.getQueryShardContext();
-        String expectedIndexName = clusterAlias == null ? index : clusterAlias + ":" + index;
-        assertEquals(expectedIndexName, queryShardContext.getFullyQualifiedIndex().getName());
-        assertEquals(expectedIndexName, searchShardTarget.getFullyQualifiedIndexName());
-        assertEquals(clusterAlias, searchShardTarget.getClusterAlias());
-        assertEquals(shardId, searchShardTarget.getShardId());
-        assertSame(searchShardTarget, searchContext.dfsResult().getSearchShardTarget());
-        assertSame(searchShardTarget, searchContext.queryResult().getSearchShardTarget());
-        assertSame(searchShardTarget, searchContext.fetchResult().getSearchShardTarget());
+        try (DefaultSearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) {
+            SearchShardTarget searchShardTarget = searchContext.shardTarget();
+            QueryShardContext queryShardContext = searchContext.getQueryShardContext();
+            String expectedIndexName = clusterAlias == null ? index : clusterAlias + ":" + index;
+            assertEquals(expectedIndexName, queryShardContext.getFullyQualifiedIndex().getName());
+            assertEquals(expectedIndexName, searchShardTarget.getFullyQualifiedIndexName());
+            assertEquals(clusterAlias, searchShardTarget.getClusterAlias());
+            assertEquals(shardId, searchShardTarget.getShardId());
+            assertSame(searchShardTarget, searchContext.dfsResult().getSearchShardTarget());
+            assertSame(searchShardTarget, searchContext.queryResult().getSearchShardTarget());
+            assertSame(searchShardTarget, searchContext.fetchResult().getSearchShardTarget());
+        }
     }
 }
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
index 4d8f9fed51b..621f303c983 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
@@ -60,6 +60,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.concurrent.TimeUnit;
 
 import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING;
 import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING;
@@ -99,10 +100,13 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
             ).get();
     }
 
-    private static void stopNode() throws IOException {
+    private static void stopNode() throws IOException, InterruptedException {
         Node node = NODE;
         NODE = null;
         IOUtils.close(node);
+        if (node != null && node.awaitClose(10, TimeUnit.SECONDS) == false) {
+            throw new AssertionError("Node couldn't close within 10 seconds.");
+        }
     }
 
     @Override
@@ -144,7 +148,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
     }
 
     @AfterClass
-    public static void tearDownClass() throws IOException {
+    public static void tearDownClass() throws Exception {
         stopNode();
     }
 
diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
index 4d531d57cef..c41a0fdcbef 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
@@ -1049,6 +1049,13 @@ public final class InternalTestCluster extends TestCluster {
                 closed.set(true);
                 markNodeDataDirsAsPendingForWipe(node);
                 node.close();
+                try {
+                    if (node.awaitClose(10, TimeUnit.SECONDS) == false) {
+                        throw new IOException("Node didn't close within 10 seconds.");
+                    }
+                } catch (InterruptedException e) {
+                    throw new AssertionError("Interruption while waiting for the node to close", e);
+                }
             }
         }
 

From f7e590ce0d7a2b2c17eb642cee78e31d74ef60b0 Mon Sep 17 00:00:00 2001
From: Adrien Grand <jpountz@gmail.com>
Date: Wed, 17 Apr 2019 16:11:14 +0200
Subject: [PATCH 061/112] ProfileScorer should propagate
 `setMinCompetitiveScore`. (#40958) (#41302)

Currently enabling profiling disables top-hits optimizations, which is
unfortunate: it would be nice to be able to notice the difference in method
counts and timings depending on whether total hit counts are requested.
---
 docs/reference/search/profile.asciidoc        | 24 +++--
 .../search/profile/query/ProfileScorer.java   | 15 ++-
 .../search/profile/query/QueryTimingType.java |  3 +-
 .../profile/query/ProfileScorerTests.java     | 92 +++++++++++++++++++
 4 files changed, 125 insertions(+), 9 deletions(-)
 create mode 100644 server/src/test/java/org/elasticsearch/search/profile/query/ProfileScorerTests.java

diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc
index 6ac5c6bcaf0..9a6779defab 100644
--- a/docs/reference/search/profile.asciidoc
+++ b/docs/reference/search/profile.asciidoc
@@ -82,7 +82,9 @@ This will yield the following result:
                           "compute_max_score": 0,
                           "compute_max_score_count": 0,
                           "shallow_advance": 0,
-                          "shallow_advance_count": 0
+                          "shallow_advance_count": 0,
+                          "set_min_competitive_score": 0,
+                          "set_min_competitive_score_count": 0
                        },
                        "children": [
                           {
@@ -105,7 +107,9 @@ This will yield the following result:
                                 "compute_max_score": 0,
                                 "compute_max_score_count": 0,
                                 "shallow_advance": 0,
-                                "shallow_advance_count": 0
+                                "shallow_advance_count": 0,
+                                "set_min_competitive_score": 0,
+                                "set_min_competitive_score_count": 0
                              }
                           },
                           {
@@ -128,7 +132,9 @@ This will yield the following result:
                                 "compute_max_score": 0,
                                 "compute_max_score_count": 0,
                                 "shallow_advance": 0,
-                                "shallow_advance_count": 0
+                                "shallow_advance_count": 0,
+                                "set_min_competitive_score": 0,
+                                "set_min_competitive_score_count": 0
                              }
                           }
                        ]
@@ -311,7 +317,9 @@ The `breakdown` component lists detailed timing statistics about low-level Lucen
    "compute_max_score": 0,
    "compute_max_score_count": 0,
    "shallow_advance": 0,
-   "shallow_advance_count": 0
+   "shallow_advance_count": 0,
+   "set_min_competitive_score": 0,
+   "set_min_competitive_score_count": 0
 }
 --------------------------------------------------
 // TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.$_path",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:some message:number",\n"time_in_nanos": $body.$_path,/]
@@ -575,7 +583,9 @@ And the response:
                               "compute_max_score": 0,
                               "compute_max_score_count": 0,
                               "shallow_advance": 0,
-                              "shallow_advance_count": 0
+                              "shallow_advance_count": 0,
+                              "set_min_competitive_score": 0,
+                              "set_min_competitive_score_count": 0
                            }
                         },
                         {
@@ -598,7 +608,9 @@ And the response:
                               "compute_max_score": 0,
                               "compute_max_score_count": 0,
                               "shallow_advance": 0,
-                              "shallow_advance_count": 0
+                              "shallow_advance_count": 0,
+                              "set_min_competitive_score": 0,
+                              "set_min_competitive_score_count": 0
                            }
                         }
                      ],
diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java
index ab8fb5dbcae..aa4af9822e4 100644
--- a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java
+++ b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java
@@ -39,10 +39,10 @@ final class ProfileScorer extends Scorer {
     private final Scorer scorer;
     private ProfileWeight profileWeight;
 
-    private final Timer scoreTimer, nextDocTimer, advanceTimer, matchTimer, shallowAdvanceTimer, computeMaxScoreTimer;
+    private final Timer scoreTimer, nextDocTimer, advanceTimer, matchTimer, shallowAdvanceTimer, computeMaxScoreTimer,
+        setMinCompetitiveScoreTimer;
     private final boolean isConstantScoreQuery;
 
-
     ProfileScorer(ProfileWeight w, Scorer scorer, QueryProfileBreakdown profile) throws IOException {
         super(w);
         this.scorer = scorer;
@@ -53,6 +53,7 @@ final class ProfileScorer extends Scorer {
         matchTimer = profile.getTimer(QueryTimingType.MATCH);
         shallowAdvanceTimer = profile.getTimer(QueryTimingType.SHALLOW_ADVANCE);
         computeMaxScoreTimer = profile.getTimer(QueryTimingType.COMPUTE_MAX_SCORE);
+        setMinCompetitiveScoreTimer = profile.getTimer(QueryTimingType.SET_MIN_COMPETITIVE_SCORE);
         ProfileScorer profileScorer = null;
         if (w.getQuery() instanceof ConstantScoreQuery && scorer instanceof ProfileScorer) {
             //Case when we have a totalHits query and it is not cached
@@ -219,4 +220,14 @@ final class ProfileScorer extends Scorer {
             computeMaxScoreTimer.stop();
         }
     }
+
+    @Override
+    public void setMinCompetitiveScore(float minScore) throws IOException {
+        setMinCompetitiveScoreTimer.start();
+        try {
+            scorer.setMinCompetitiveScore(minScore);
+        } finally {
+            setMinCompetitiveScoreTimer.stop();
+        }
+    }
 }
diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java b/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java
index 146bd8f07bc..aecc41d8a23 100644
--- a/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java
+++ b/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java
@@ -29,7 +29,8 @@ public enum QueryTimingType {
     MATCH,
     SCORE,
     SHALLOW_ADVANCE,
-    COMPUTE_MAX_SCORE;
+    COMPUTE_MAX_SCORE,
+    SET_MIN_COMPETITIVE_SCORE;
 
     @Override
     public String toString() {
diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/ProfileScorerTests.java b/server/src/test/java/org/elasticsearch/search/profile/query/ProfileScorerTests.java
new file mode 100644
index 00000000000..fd72bdfa6de
--- /dev/null
+++ b/server/src/test/java/org/elasticsearch/search/profile/query/ProfileScorerTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile.query;
+
+import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreMode;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+
+public class ProfileScorerTests extends ESTestCase {
+
+    private static class FakeScorer extends Scorer {
+
+        public float maxScore, minCompetitiveScore;
+
+        protected FakeScorer(Weight weight) {
+            super(weight);
+        }
+
+        @Override
+        public DocIdSetIterator iterator() {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public float getMaxScore(int upTo) throws IOException {
+            return maxScore;
+        }
+
+        @Override
+        public float score() throws IOException {
+            return 1f;
+        }
+
+        @Override
+        public int docID() {
+            throw new UnsupportedOperationException();
+        }
+        
+        @Override
+        public void setMinCompetitiveScore(float minScore) {
+            this.minCompetitiveScore = minScore;
+        }
+    }
+
+    public void testPropagateMinCompetitiveScore() throws IOException {
+        Query query = new MatchAllDocsQuery();
+        Weight weight = query.createWeight(new IndexSearcher(new MultiReader()), ScoreMode.TOP_SCORES, 1f);
+        FakeScorer fakeScorer = new FakeScorer(weight);
+        QueryProfileBreakdown profile = new QueryProfileBreakdown();
+        ProfileWeight profileWeight = new ProfileWeight(query, weight, profile);
+        ProfileScorer profileScorer = new ProfileScorer(profileWeight, fakeScorer, profile);
+        profileScorer.setMinCompetitiveScore(0.42f);
+        assertEquals(0.42f, fakeScorer.minCompetitiveScore, 0f);
+    }
+
+    public void testPropagateMaxScore() throws IOException {
+        Query query = new MatchAllDocsQuery();
+        Weight weight = query.createWeight(new IndexSearcher(new MultiReader()), ScoreMode.TOP_SCORES, 1f);
+        FakeScorer fakeScorer = new FakeScorer(weight);
+        QueryProfileBreakdown profile = new QueryProfileBreakdown();
+        ProfileWeight profileWeight = new ProfileWeight(query, weight, profile);
+        ProfileScorer profileScorer = new ProfileScorer(profileWeight, fakeScorer, profile);
+        profileScorer.setMinCompetitiveScore(0.42f);
+        fakeScorer.maxScore = 42f;
+        assertEquals(42f, profileScorer.getMaxScore(DocIdSetIterator.NO_MORE_DOCS), 0f);
+    }
+}

From aa0c957a4a3ba611db9b9fd494e56345a4ed0936 Mon Sep 17 00:00:00 2001
From: Nhat Nguyen <nhat.nguyen@elastic.co>
Date: Thu, 11 Apr 2019 15:15:00 -0400
Subject: [PATCH 062/112] Do not trim unsafe commits when open readonly engine
 (#41041)

Today we always trim unsafe commits (whose max_seq_no >= global
checkpoint) before starting a read-write or read-only engine. This is
mandatory for read-write engines because they must start with the safe
commit. This is also fine for read-only engines since most of the cases
we should have exactly one commit after closing an index (trimming is a
noop). However, this is dangerous for following indices which might have
more than one commits when they are being closed.

With this change, we move the trimming logic to the ctor of InternalEngine
so we won't trim anything if we are going to open a read-only engine.
---
 .../index/engine/InternalEngine.java          | 13 ++++-
 .../index/engine/ReadOnlyEngine.java          | 47 ++++++++++---------
 .../elasticsearch/index/shard/IndexShard.java | 17 +------
 .../index/engine/InternalEngineTests.java     | 33 -------------
 .../index/engine/ReadOnlyEngineTests.java     | 21 ++-------
 .../index/shard/IndexShardTests.java          | 38 +++++++++++++++
 6 files changed, 80 insertions(+), 89 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
index 86dea798dc2..d6558a8dd37 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
@@ -78,6 +78,7 @@ import org.elasticsearch.index.seqno.SeqNoStats;
 import org.elasticsearch.index.seqno.SequenceNumbers;
 import org.elasticsearch.index.shard.ElasticsearchMergePolicy;
 import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.Store;
 import org.elasticsearch.index.translog.Translog;
 import org.elasticsearch.index.translog.TranslogConfig;
 import org.elasticsearch.index.translog.TranslogCorruptedException;
@@ -87,6 +88,7 @@ import org.elasticsearch.threadpool.ThreadPool;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.nio.file.Path;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
@@ -185,10 +187,10 @@ public class InternalEngine extends Engine {
         boolean success = false;
         try {
             this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().relativeTimeInMillis();
-
             mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings());
             throttle = new IndexThrottle();
             try {
+                trimUnsafeCommits(engineConfig);
                 translog = openTranslog(engineConfig, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier());
                 assert translog.getGeneration() != null;
                 this.translog = translog;
@@ -2754,4 +2756,13 @@ public class InternalEngine extends Engine {
         final long maxSeqNo = SequenceNumbers.max(localCheckpointTracker.getMaxSeqNo(), translog.getMaxSeqNo());
         advanceMaxSeqNoOfUpdatesOrDeletes(maxSeqNo);
     }
+
+    private static void trimUnsafeCommits(EngineConfig engineConfig) throws IOException {
+        final Store store = engineConfig.getStore();
+        final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY);
+        final Path translogPath = engineConfig.getTranslogConfig().getTranslogPath();
+        final long globalCheckpoint = Translog.readGlobalCheckpoint(translogPath, translogUUID);
+        final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogPath, translogUUID);
+        store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, engineConfig.getIndexSettings().getIndexVersionCreated());
+    }
 }
diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java
index fa09b3529d7..b5618d5b9cb 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java
@@ -30,7 +30,6 @@ import org.apache.lucene.search.ReferenceManager;
 import org.apache.lucene.search.SearcherManager;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.Lock;
-import org.elasticsearch.Assertions;
 import org.elasticsearch.Version;
 import org.elasticsearch.common.lucene.Lucene;
 import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
@@ -102,23 +101,8 @@ public class ReadOnlyEngine extends Engine {
                 this.lastCommittedSegmentInfos = Lucene.readSegmentInfos(directory);
                 this.translogStats = translogStats == null ? new TranslogStats(0, 0, 0, 0, 0) : translogStats;
                 if (seqNoStats == null) {
-                    seqNoStats = buildSeqNoStats(lastCommittedSegmentInfos);
-                    // Before 8.0 the global checkpoint is not known and up to date when the engine is created after
-                    // peer recovery, so we only check the max seq no / global checkpoint coherency when the global
-                    // checkpoint is different from the unassigned sequence number value.
-                    // In addition to that we only execute the check if the index the engine belongs to has been
-                    // created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction
-                    // that guarantee that all operations have been flushed to Lucene.
-                    final long globalCheckpoint = engineConfig.getGlobalCheckpointSupplier().getAsLong();
-                    final Version indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated();
-                    if (indexVersionCreated.onOrAfter(Version.V_7_1_0) ||
-                        (globalCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && indexVersionCreated.onOrAfter(Version.V_6_7_0))) {
-                        if (seqNoStats.getMaxSeqNo() != globalCheckpoint) {
-                            assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), globalCheckpoint);
-                            throw new IllegalStateException("Maximum sequence number [" + seqNoStats.getMaxSeqNo()
-                                + "] from last commit does not match global checkpoint [" + globalCheckpoint + "]");
-                        }
-                    }
+                    seqNoStats = buildSeqNoStats(config, lastCommittedSegmentInfos);
+                    ensureMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats);
                 }
                 this.seqNoStats = seqNoStats;
                 this.indexCommit = Lucene.getIndexCommit(lastCommittedSegmentInfos, directory);
@@ -138,10 +122,27 @@ public class ReadOnlyEngine extends Engine {
         }
     }
 
-    protected void assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final long globalCheckpoint) {
-        if (Assertions.ENABLED) {
-            assert false : "max seq. no. [" + maxSeqNo + "] does not match [" + globalCheckpoint + "]";
+    protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(final SeqNoStats seqNoStats) {
+        // Before 8.0 the global checkpoint is not known and up to date when the engine is created after
+        // peer recovery, so we only check the max seq no / global checkpoint coherency when the global
+        // checkpoint is different from the unassigned sequence number value.
+        // In addition to that we only execute the check if the index the engine belongs to has been
+        // created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction
+        // that guarantee that all operations have been flushed to Lucene.
+        final Version indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated();
+        if (indexVersionCreated.onOrAfter(Version.V_8_0_0) ||
+            (seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO && indexVersionCreated.onOrAfter(Version.V_6_7_0))) {
+            if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) {
+                throw new IllegalStateException("Maximum sequence number [" + seqNoStats.getMaxSeqNo()
+                    + "] from last commit does not match global checkpoint [" + seqNoStats.getGlobalCheckpoint() + "]");
+            }
         }
+        assert assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), seqNoStats.getMaxSeqNo());
+    }
+
+    protected boolean assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final long globalCheckpoint) {
+        assert maxSeqNo == globalCheckpoint : "max seq. no. [" + maxSeqNo + "] does not match [" + globalCheckpoint + "]";
+        return true;
     }
 
     @Override
@@ -198,12 +199,12 @@ public class ReadOnlyEngine extends Engine {
         }
     }
 
-    public static SeqNoStats buildSeqNoStats(SegmentInfos infos) {
+    private static SeqNoStats buildSeqNoStats(EngineConfig config, SegmentInfos infos) {
         final SequenceNumbers.CommitInfo seqNoStats =
             SequenceNumbers.loadSeqNoInfoFromLuceneCommit(infos.userData.entrySet());
         long maxSeqNo = seqNoStats.maxSeqNo;
         long localCheckpoint = seqNoStats.localCheckpoint;
-        return new SeqNoStats(maxSeqNo, localCheckpoint, localCheckpoint);
+        return new SeqNoStats(maxSeqNo, localCheckpoint, config.getGlobalCheckpointSupplier().getAsLong());
     }
 
     @Override
diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index ff1922a231d..f9db0b15e7a 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -1439,7 +1439,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
         assert recoveryState.getRecoverySource().expectEmptyRetentionLeases() == false || getRetentionLeases().leases().isEmpty()
             : "expected empty set of retention leases with recovery source [" + recoveryState.getRecoverySource()
             + "] but got " + getRetentionLeases();
-        trimUnsafeCommits();
         synchronized (mutex) {
             verifyNotClosed();
             assert currentEngineReference.get() == null : "engine is running";
@@ -1458,15 +1457,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
         assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage();
     }
 
-    private void trimUnsafeCommits() throws IOException {
-        assert currentEngineReference.get() == null || currentEngineReference.get() instanceof ReadOnlyEngine : "a write engine is running";
-        final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY);
-        final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID);
-        final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID);
-        assertMaxUnsafeAutoIdInCommit();
-        store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, indexSettings.getIndexVersionCreated());
-    }
-
     private boolean assertSequenceNumbersInCommit() throws IOException {
         final Map<String, String> userData = SegmentInfos.readLatestCommit(store.directory()).getUserData();
         assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint";
@@ -1474,11 +1464,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
         assert userData.containsKey(Engine.HISTORY_UUID_KEY) : "commit point doesn't contains a history uuid";
         assert userData.get(Engine.HISTORY_UUID_KEY).equals(getHistoryUUID()) : "commit point history uuid ["
             + userData.get(Engine.HISTORY_UUID_KEY) + "] is different than engine [" + getHistoryUUID() + "]";
-        return true;
-    }
-
-    private boolean assertMaxUnsafeAutoIdInCommit() throws IOException {
-        final Map<String, String> userData = SegmentInfos.readLatestCommit(store.directory()).getUserData();
         assert userData.containsKey(Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID) :
             "opening index which was created post 5.5.0 but " + Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID
                 + " is not found in commit";
@@ -3107,8 +3092,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
         Engine newEngine = null;
         try {
             final long globalCheckpoint = getGlobalCheckpoint();
-            trimUnsafeCommits();
             synchronized (mutex) {
+                assert currentEngineReference.get() instanceof ReadOnlyEngine : "another write engine is running";
                 verifyNotClosed();
                 // we must create a new engine under mutex (see IndexShard#snapshotStoreMetadata).
                 newEngine = engineFactory.newReadWriteEngine(newEngineConfig());
diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
index dba3fec9213..da157f58668 100644
--- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
+++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
@@ -674,7 +674,6 @@ public class InternalEngineTests extends EngineTestCase {
         InternalEngine engine = createEngine(store, translog);
         engine.close();
 
-        trimUnsafeCommits(engine.config());
         engine = new InternalEngine(engine.config());
         assertTrue(engine.isRecovering());
         engine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
@@ -691,7 +690,6 @@ public class InternalEngineTests extends EngineTestCase {
         engine.index(indexForDoc(doc));
         engine.close();
 
-        trimUnsafeCommits(engine.config());
         engine = new InternalEngine(engine.config());
         expectThrows(IllegalStateException.class, () -> engine.flush(true, true));
         assertTrue(engine.isRecovering());
@@ -726,7 +724,6 @@ public class InternalEngineTests extends EngineTestCase {
         } finally {
             IOUtils.close(engine);
         }
-        trimUnsafeCommits(engine.config());
         try (Engine recoveringEngine = new InternalEngine(engine.config())) {
             recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
             recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
@@ -755,7 +752,6 @@ public class InternalEngineTests extends EngineTestCase {
         Engine recoveringEngine = null;
         try {
             final AtomicBoolean committed = new AtomicBoolean();
-            trimUnsafeCommits(initialEngine.config());
             recoveringEngine = new InternalEngine(initialEngine.config()) {
 
                 @Override
@@ -798,7 +794,6 @@ public class InternalEngineTests extends EngineTestCase {
                 }
             }
             initialEngine.close();
-            trimUnsafeCommits(initialEngine.config());
             recoveringEngine = new InternalEngine(initialEngine.config());
             recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
             recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
@@ -834,14 +829,12 @@ public class InternalEngineTests extends EngineTestCase {
                 globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint()));
                 engine.syncTranslog();
             }
-            trimUnsafeCommits(config);
             try (InternalEngine engine = new InternalEngine(config)) {
                 engine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
                 engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
                 assertThat(engine.getLocalCheckpoint(), equalTo(maxSeqNo));
                 assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(maxSeqNo));
             }
-            trimUnsafeCommits(config);
             try (InternalEngine engine = new InternalEngine(config)) {
                 long upToSeqNo = randomLongBetween(globalCheckpoint.get(), maxSeqNo);
                 engine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
@@ -1259,7 +1252,6 @@ public class InternalEngineTests extends EngineTestCase {
                 UNASSIGNED_SEQ_NO, shardId, primaryTerm.get());
             store.associateIndexWithNewTranslog(translogUUID);
         }
-        trimUnsafeCommits(config);
         engine = new InternalEngine(config);
         engine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
         engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
@@ -1280,7 +1272,6 @@ public class InternalEngineTests extends EngineTestCase {
         engine.index(indexForDoc(doc));
         EngineConfig config = engine.config();
         engine.close();
-        trimUnsafeCommits(config);
         engine = new InternalEngine(config);
         engine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
         engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
@@ -2379,7 +2370,6 @@ public class InternalEngineTests extends EngineTestCase {
             IOUtils.close(initialEngine);
         }
 
-        trimUnsafeCommits(initialEngine.engineConfig);
         try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())) {
             recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
             recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
@@ -2724,7 +2714,6 @@ public class InternalEngineTests extends EngineTestCase {
             // open and recover tlog
             {
                 for (int i = 0; i < 2; i++) {
-                    trimUnsafeCommits(config);
                     try (InternalEngine engine = new InternalEngine(config)) {
                         assertTrue(engine.isRecovering());
                         Map<String, String> userData = engine.getLastCommittedSegmentInfos().getUserData();
@@ -2751,7 +2740,6 @@ public class InternalEngineTests extends EngineTestCase {
                     Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(),
                         SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
                 store.associateIndexWithNewTranslog(translogUUID);
-                trimUnsafeCommits(config);
                 try (InternalEngine engine = new InternalEngine(config)) {
                     Map<String, String> userData = engine.getLastCommittedSegmentInfos().getUserData();
                     assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY));
@@ -2766,7 +2754,6 @@ public class InternalEngineTests extends EngineTestCase {
             // open and recover tlog with empty tlog
             {
                 for (int i = 0; i < 2; i++) {
-                    trimUnsafeCommits(config);
                     try (InternalEngine engine = new InternalEngine(config)) {
                         Map<String, String> userData = engine.getLastCommittedSegmentInfos().getUserData();
                         assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY));
@@ -2831,7 +2818,6 @@ public class InternalEngineTests extends EngineTestCase {
                 boolean started = false;
                 InternalEngine engine = null;
                 try {
-                    trimUnsafeCommits(config(defaultSettings, store, translogPath, NoMergePolicy.INSTANCE, null));
                     engine = createEngine(store, translogPath);
                     started = true;
                 } catch (EngineException | IOException e) {
@@ -2917,7 +2903,6 @@ public class InternalEngineTests extends EngineTestCase {
         EngineConfig config = engine.config();
         assertVisibleCount(engine, numDocs);
         engine.close();
-        trimUnsafeCommits(config);
         try (InternalEngine engine = new InternalEngine(config)) {
             engine.skipTranslogRecovery();
             try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
@@ -2960,7 +2945,6 @@ public class InternalEngineTests extends EngineTestCase {
         translogHandler.mappingUpdate = dynamicUpdate();
 
         engine.close();
-        trimUnsafeCommits(copy(engine.config(), inSyncGlobalCheckpointSupplier));
         // we need to reuse the engine config unless the parser.mappingModified won't work
         engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier));
         engine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
@@ -4093,7 +4077,6 @@ public class InternalEngineTests extends EngineTestCase {
         } finally {
             IOUtils.close(initialEngine);
         }
-        trimUnsafeCommits(initialEngine.config());
         try (Engine recoveringEngine = new InternalEngine(initialEngine.config())) {
             recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
             recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
@@ -4199,7 +4182,6 @@ public class InternalEngineTests extends EngineTestCase {
             final BiFunction<Long, Long, LocalCheckpointTracker> supplier = (ms, lcp) -> new LocalCheckpointTracker(
                     maxSeqNo,
                     localCheckpoint);
-            trimUnsafeCommits(engine.config());
             EngineConfig noopEngineConfig = copy(engine.config(), new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD,
                 () -> new MatchAllDocsQuery(), engine.config().getMergePolicy()));
             noOpEngine = new InternalEngine(noopEngineConfig, supplier) {
@@ -4443,7 +4425,6 @@ public class InternalEngineTests extends EngineTestCase {
                 prevDocs = getDocIds(engine, true);
                 totalTranslogOps = engine.getTranslog().totalOperations();
             }
-            trimUnsafeCommits(engineConfig);
             try (InternalEngine engine = new InternalEngine(engineConfig)) {
                 engine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
                 engine.recoverFromTranslog(translogHandler, globalCheckpoint.get());
@@ -4490,7 +4471,6 @@ public class InternalEngineTests extends EngineTestCase {
             assertEquals(docs - 1, engine.getLocalCheckpoint());
             assertEquals(maxSeqIDOnReplica, replicaEngine.getSeqNoStats(-1).getMaxSeqNo());
             assertEquals(checkpointOnReplica, replicaEngine.getLocalCheckpoint());
-            trimUnsafeCommits(copy(replicaEngine.config(), globalCheckpoint::get));
             recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get));
             assertEquals(numDocsOnReplica, getTranslog(recoveringEngine).stats().getUncommittedOperations());
             recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
@@ -4525,7 +4505,6 @@ public class InternalEngineTests extends EngineTestCase {
 
         // now do it again to make sure we preserve values etc.
         try {
-            trimUnsafeCommits(replicaEngine.config());
             recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get));
             if (flushed) {
                 assertThat(recoveringEngine.getTranslogStats().getUncommittedOperations(), equalTo(0));
@@ -5401,7 +5380,6 @@ public class InternalEngineTests extends EngineTestCase {
         } else {
             engine.flushAndClose();
         }
-        trimUnsafeCommits(engine.config());
         try (InternalEngine recoveringEngine = new InternalEngine(engine.config())) {
             assertThat(recoveringEngine.getMinRetainedSeqNo(), equalTo(lastMinRetainedSeqNo));
         }
@@ -5556,7 +5534,6 @@ public class InternalEngineTests extends EngineTestCase {
                 engine.syncTranslog();
                 docs = getDocIds(engine, true);
             }
-            trimUnsafeCommits(config);
             Set<Long> seqNosInSafeCommit = null;
             for (int i = commits.size() - 1; i >= 0; i--) {
                 if (commits.get(i).stream().allMatch(op -> op.seqNo() <= globalCheckpoint.get())) {
@@ -5600,7 +5577,6 @@ public class InternalEngineTests extends EngineTestCase {
                 IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(Settings.builder()
                     .put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false)).build());
             EngineConfig config = config(softDeletesDisabled, store, translogPath, newMergePolicy(), null, null, globalCheckpoint::get);
-            trimUnsafeCommits(config);
             try (InternalEngine engine = createEngine(config)) {
                 assertThat(getDocIds(engine, true), equalTo(docs));
             }
@@ -5620,15 +5596,6 @@ public class InternalEngineTests extends EngineTestCase {
         }
     }
 
-    static void trimUnsafeCommits(EngineConfig config) throws IOException {
-        final Store store = config.getStore();
-        final TranslogConfig translogConfig = config.getTranslogConfig();
-        final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY);
-        final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID);
-        final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID);
-        store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, config.getIndexSettings().getIndexVersionCreated());
-    }
-
     void assertLuceneOperations(InternalEngine engine, long expectedAppends, long expectedUpdates, long expectedDeletes) {
         String message = "Lucene operations mismatched;" +
             " appends [actual:" + engine.getNumDocAppends() + ", expected:" + expectedAppends + "]," +
diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java
index e19bdc42b01..e66094d7321 100644
--- a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java
+++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java
@@ -48,9 +48,6 @@ public class ReadOnlyEngineTests extends EngineTestCase {
             try (InternalEngine engine = createEngine(config)) {
                 Engine.Get get = null;
                 for (int i = 0; i < numDocs; i++) {
-                    if (rarely()) {
-                        continue; // gap in sequence number
-                    }
                     ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null);
                     engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA,
                         System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0));
@@ -94,7 +91,6 @@ public class ReadOnlyEngineTests extends EngineTestCase {
 
             }
             // Close and reopen the main engine
-            InternalEngineTests.trimUnsafeCommits(config);
             try (InternalEngine recoveringEngine = new InternalEngine(config)) {
                 recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes();
                 recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
@@ -117,17 +113,15 @@ public class ReadOnlyEngineTests extends EngineTestCase {
             int numDocs = scaledRandomIntBetween(10, 1000);
             try (InternalEngine engine = createEngine(config)) {
                 for (int i = 0; i < numDocs; i++) {
-                    if (rarely()) {
-                        continue; // gap in sequence number
-                    }
                     ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null);
                     engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA,
                         System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0));
                     if (rarely()) {
                         engine.flush();
                     }
-                    globalCheckpoint.set(i);
+                    globalCheckpoint.set(engine.getLocalCheckpoint());
                 }
+                globalCheckpoint.set(engine.getLocalCheckpoint());
                 engine.syncTranslog();
                 engine.flushAndClose();
                 readOnlyEngine = new ReadOnlyEngine(engine.engineConfig, null , null, true, Function.identity());
@@ -141,7 +135,6 @@ public class ReadOnlyEngineTests extends EngineTestCase {
 
     public void testEnsureMaxSeqNoIsEqualToGlobalCheckpoint() throws IOException {
         IOUtils.close(engine, store);
-        Engine readOnlyEngine = null;
         final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
         try (Store store = createStore()) {
             EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get);
@@ -159,16 +152,15 @@ public class ReadOnlyEngineTests extends EngineTestCase {
                 engine.flushAndClose();
 
                 IllegalStateException exception = expectThrows(IllegalStateException.class,
-                    () -> new ReadOnlyEngine(engine.engineConfig, null, null, true, Function.identity()) {
+                    () -> new ReadOnlyEngine(config, null, null, true, Function.identity()) {
                         @Override
-                        protected void assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final long globalCheckpoint) {
+                        protected boolean assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final long globalCheckpoint) {
                             // we don't want the assertion to trip in this test
+                            return true;
                         }
                     });
                 assertThat(exception.getMessage(), equalTo("Maximum sequence number [" + maxSeqNo
                     + "] from last commit does not match global checkpoint [" + globalCheckpoint.get() + "]"));
-            } finally {
-                IOUtils.close(readOnlyEngine);
             }
         }
     }
@@ -219,9 +211,6 @@ public class ReadOnlyEngineTests extends EngineTestCase {
             int numDocs = scaledRandomIntBetween(10, 1000);
             try (InternalEngine engine = createEngine(config)) {
                 for (int i = 0; i < numDocs; i++) {
-                    if (rarely()) {
-                        continue; // gap in sequence number
-                    }
                     ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null);
                     engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA,
                         System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0));
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
index 042b61c77c2..05a81c6de3c 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
@@ -86,6 +86,7 @@ import org.elasticsearch.index.engine.EngineException;
 import org.elasticsearch.index.engine.EngineTestCase;
 import org.elasticsearch.index.engine.InternalEngine;
 import org.elasticsearch.index.engine.InternalEngineFactory;
+import org.elasticsearch.index.engine.ReadOnlyEngine;
 import org.elasticsearch.index.engine.Segment;
 import org.elasticsearch.index.engine.SegmentsStats;
 import org.elasticsearch.index.fielddata.FieldDataStats;
@@ -154,6 +155,7 @@ import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.BiConsumer;
 import java.util.function.Consumer;
+import java.util.function.Function;
 import java.util.function.LongFunction;
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
@@ -3812,4 +3814,40 @@ public class IndexShardTests extends IndexShardTestCase {
             indexShard.acquireAllReplicaOperationsPermits(opPrimaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, listener, timeout);
         }
     }
+
+    public void testDoNotTrimCommitsWhenOpenReadOnlyEngine() throws Exception {
+        final IndexShard shard = newStartedShard(false, Settings.EMPTY, new InternalEngineFactory());
+        long numDocs = randomLongBetween(1, 20);
+        long seqNo = 0;
+        for (long i = 0; i < numDocs; i++) {
+            if (rarely()) {
+                seqNo++; // create gaps in sequence numbers
+            }
+            shard.applyIndexOperationOnReplica(seqNo, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
+                new SourceToParse(shard.shardId.getIndexName(), "_doc", Long.toString(i), new BytesArray("{}"), XContentType.JSON));
+            shard.updateGlobalCheckpointOnReplica(shard.getLocalCheckpoint(), "test");
+            if (randomInt(100) < 10) {
+                shard.flush(new FlushRequest());
+            }
+            seqNo++;
+        }
+        shard.flush(new FlushRequest());
+        assertThat(shard.docStats().getCount(), equalTo(numDocs));
+        final ShardRouting replicaRouting = shard.routingEntry();
+        ShardRouting readonlyShardRouting = newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), true,
+            ShardRoutingState.INITIALIZING, RecoverySource.ExistingStoreRecoverySource.INSTANCE);
+        final IndexShard readonlyShard = reinitShard(shard, readonlyShardRouting,
+            engineConfig -> new ReadOnlyEngine(engineConfig, null, null, false, Function.identity()) {
+                @Override
+                protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(SeqNoStats seqNoStats) {
+                    // just like a following shard, we need to skip this check for now.
+                }
+            }
+        );
+        DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
+        readonlyShard.markAsRecovering("store", new RecoveryState(readonlyShard.routingEntry(), localNode, null));
+        assertTrue(readonlyShard.recoverFromStore());
+        assertThat(readonlyShard.docStats().getCount(), equalTo(numDocs));
+        closeShards(readonlyShard);
+    }
 }

From 2ee87c99d9eb2a37e601e58a7ac858744dd94a45 Mon Sep 17 00:00:00 2001
From: Nhat Nguyen <nhat.nguyen@elastic.co>
Date: Wed, 17 Apr 2019 10:25:47 -0400
Subject: [PATCH 063/112] Fix bwc version of sanity check of read only engine

Relates #41041
---
 .../java/org/elasticsearch/index/engine/ReadOnlyEngine.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java
index b5618d5b9cb..777aff88e9d 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java
@@ -130,7 +130,7 @@ public class ReadOnlyEngine extends Engine {
         // created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction
         // that guarantee that all operations have been flushed to Lucene.
         final Version indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated();
-        if (indexVersionCreated.onOrAfter(Version.V_8_0_0) ||
+        if (indexVersionCreated.onOrAfter(Version.V_7_1_0) ||
             (seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO && indexVersionCreated.onOrAfter(Version.V_6_7_0))) {
             if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) {
                 throw new IllegalStateException("Maximum sequence number [" + seqNoStats.getMaxSeqNo()

From e090176f17867c8c9a04986ebc1b3516d63d0dc2 Mon Sep 17 00:00:00 2001
From: Iana Bondarska <yana2301@gmail.com>
Date: Wed, 17 Apr 2019 17:01:46 +0200
Subject: [PATCH 064/112] [ML] Exclude analysis fields with core field names
 from anomaly results (#41093)

Added "_index", "_type", "_id" to list of reserved fields.

Closes #39406
---
 .../core/ml/job/results/ReservedFieldNames.java   | 15 ++++++++++++---
 .../persistence/ElasticsearchMappingsTests.java   |  9 +++++++++
 .../ml/job/results/ReservedFieldNamesTests.java   |  6 +++++-
 3 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java
index 333b87b0c29..51bdc5ce594 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java
@@ -5,6 +5,7 @@
  */
 package org.elasticsearch.xpack.core.ml.job.results;
 
+import org.elasticsearch.index.get.GetResult;
 import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig;
 import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
 import org.elasticsearch.xpack.core.ml.datafeed.DelayedDataCheckConfig;
@@ -171,8 +172,12 @@ public final class ReservedFieldNames {
 
             Result.RESULT_TYPE.getPreferredName(),
             Result.TIMESTAMP.getPreferredName(),
-            Result.IS_INTERIM.getPreferredName()
-    };
+            Result.IS_INTERIM.getPreferredName(),
+
+            GetResult._ID,
+            GetResult._INDEX,
+            GetResult._TYPE
+   };
 
     /**
      * This array should be updated to contain all the field names that appear
@@ -256,7 +261,11 @@ public final class ReservedFieldNames {
             ChunkingConfig.MODE_FIELD.getPreferredName(),
             ChunkingConfig.TIME_SPAN_FIELD.getPreferredName(),
 
-            ElasticsearchMappings.CONFIG_TYPE
+            ElasticsearchMappings.CONFIG_TYPE,
+
+            GetResult._ID,
+            GetResult._INDEX,
+            GetResult._TYPE
     };
 
     /**
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java
index f5461a1abf3..42e328e3591 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java
@@ -19,6 +19,7 @@ import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.get.GetResult;
 import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.test.VersionUtils;
 import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
@@ -63,6 +64,12 @@ public class ElasticsearchMappingsTests extends ESTestCase {
             ElasticsearchMappings.WHITESPACE
     );
 
+    private static List<String> INTERNAL_FIELDS = Arrays.asList(
+            GetResult._ID,
+            GetResult._INDEX,
+            GetResult._TYPE
+    );
+
     public void testResultsMapppingReservedFields() throws Exception {
         Set<String> overridden = new HashSet<>(KEYWORDS);
 
@@ -76,6 +83,7 @@ public class ElasticsearchMappingsTests extends ESTestCase {
 
         Set<String> expected = collectResultsDocFieldNames();
         expected.removeAll(overridden);
+        expected.addAll(INTERNAL_FIELDS);
 
         compareFields(expected, ReservedFieldNames.RESERVED_RESULT_FIELD_NAMES);
     }
@@ -91,6 +99,7 @@ public class ElasticsearchMappingsTests extends ESTestCase {
 
         Set<String> expected = collectConfigDocFieldNames();
         expected.removeAll(overridden);
+        expected.addAll(INTERNAL_FIELDS);
 
         compareFields(expected, ReservedFieldNames.RESERVED_CONFIG_FIELD_NAMES);
     }
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNamesTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNamesTests.java
index a08b53fba3c..d594404e9c2 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNamesTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNamesTests.java
@@ -5,6 +5,7 @@
  */
 package org.elasticsearch.xpack.ml.job.results;
 
+import org.elasticsearch.index.get.GetResult;
 import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord;
 import org.elasticsearch.xpack.core.ml.job.results.ReservedFieldNames;
@@ -16,5 +17,8 @@ public class ReservedFieldNamesTests extends ESTestCase {
         assertTrue(ReservedFieldNames.isValidFieldName("host.actual"));
         assertFalse(ReservedFieldNames.isValidFieldName("actual.host"));
         assertFalse(ReservedFieldNames.isValidFieldName(AnomalyRecord.BUCKET_SPAN.getPreferredName()));
+        assertFalse(ReservedFieldNames.isValidFieldName(GetResult._INDEX));
+        assertFalse(ReservedFieldNames.isValidFieldName(GetResult._TYPE));
+        assertFalse(ReservedFieldNames.isValidFieldName(GetResult._ID));
     }
-}
\ No newline at end of file
+}

From 2670ed2f8f4ce136952fb01f00535fc0f1fc5267 Mon Sep 17 00:00:00 2001
From: David Turner <david.turner@elastic.co>
Date: Wed, 17 Apr 2019 16:36:04 +0100
Subject: [PATCH 065/112] Assert the stability of custom search preferences
 (#41150)

Today the `?preference=custom_string_value` search preference will only change
its choice of a shard copy if something changes the `IndexShardRoutingTable`
for that specific shard. Users can use this behaviour to route searches to a
consistent set of shard copies, which means they can reliably hit copies with
hot caches, and use the other copies only for redundancy in case of failure.
However we do not assert this property anywhere, so we might break it in
future.

This commit adds a test that shows that searches are routed consistently even
if other indices are created/rebalanced/deleted.

Relates https://discuss.elastic.co/t/176598, #41115, #26791
---
 .../search/preference/SearchPreferenceIT.java | 71 +++++++++++++++++--
 1 file changed, 64 insertions(+), 7 deletions(-)

diff --git a/server/src/test/java/org/elasticsearch/search/preference/SearchPreferenceIT.java b/server/src/test/java/org/elasticsearch/search/preference/SearchPreferenceIT.java
index 366975071ce..23c29ce9f46 100644
--- a/server/src/test/java/org/elasticsearch/search/preference/SearchPreferenceIT.java
+++ b/server/src/test/java/org/elasticsearch/search/preference/SearchPreferenceIT.java
@@ -25,10 +25,13 @@ import org.elasticsearch.action.search.SearchRequestBuilder;
 import org.elasticsearch.action.search.SearchResponse;
 import org.elasticsearch.client.Client;
 import org.elasticsearch.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.routing.OperationRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
 import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.node.Node;
 import org.elasticsearch.rest.RestStatus;
 import org.elasticsearch.test.ESIntegTestCase;
 
@@ -42,10 +45,10 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
 import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.hasToString;
-import static org.hamcrest.Matchers.not;
 import static org.hamcrest.Matchers.greaterThan;
 import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.hasToString;
+import static org.hamcrest.Matchers.not;
 
 @ESIntegTestCase.ClusterScope(minNumDataNodes = 2)
 public class SearchPreferenceIT extends ESIntegTestCase {
@@ -57,7 +60,7 @@ public class SearchPreferenceIT extends ESIntegTestCase {
     }
 
     // see #2896
-    public void testStopOneNodePreferenceWithRedState() throws InterruptedException, IOException {
+    public void testStopOneNodePreferenceWithRedState() throws IOException {
         assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", cluster().numDataNodes()+2)
                 .put("index.number_of_replicas", 0)));
         ensureGreen();
@@ -87,7 +90,7 @@ public class SearchPreferenceIT extends ESIntegTestCase {
         assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0));
     }
 
-    public void testNoPreferenceRandom() throws Exception {
+    public void testNoPreferenceRandom() {
         assertAcked(prepareCreate("test").setSettings(
                 //this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data
                 Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(1, maximumNumberOfReplicas()))
@@ -106,7 +109,7 @@ public class SearchPreferenceIT extends ESIntegTestCase {
         assertThat(firstNodeId, not(equalTo(secondNodeId)));
     }
 
-    public void testSimplePreference() throws Exception {
+    public void testSimplePreference() {
         client().admin().indices().prepareCreate("test").setSettings("{\"number_of_replicas\": 1}", XContentType.JSON).get();
         ensureGreen();
 
@@ -123,7 +126,7 @@ public class SearchPreferenceIT extends ESIntegTestCase {
         assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L));
     }
 
-    public void testThatSpecifyingNonExistingNodesReturnsUsefulError() throws Exception {
+    public void testThatSpecifyingNonExistingNodesReturnsUsefulError() {
         createIndex("test");
         ensureGreen();
 
@@ -135,7 +138,7 @@ public class SearchPreferenceIT extends ESIntegTestCase {
         }
     }
 
-    public void testNodesOnlyRandom() throws Exception {
+    public void testNodesOnlyRandom() {
         assertAcked(prepareCreate("test").setSettings(
             //this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data
             Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(1, maximumNumberOfReplicas()))));
@@ -193,4 +196,58 @@ public class SearchPreferenceIT extends ESIntegTestCase {
         }
         assertThat(hitNodes.size(), greaterThan(1));
     }
+
+    public void testCustomPreferenceUnaffectedByOtherShardMovements() {
+
+        /*
+         * Custom preferences can be used to encourage searches to go to a consistent set of shard copies, meaning that other copies' data
+         * is rarely touched and can be dropped from the filesystem cache. This works best if the set of shards searched doesn't change
+         * unnecessarily, so this test verifies a consistent routing even as other shards are created/relocated/removed.
+         */
+
+        assertAcked(prepareCreate("test").setSettings(Settings.builder().put(indexSettings())
+            .put(SETTING_NUMBER_OF_REPLICAS, between(1, maximumNumberOfReplicas()))
+            .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)));
+        ensureGreen();
+        client().prepareIndex("test", "_doc").setSource("field1", "value1").get();
+        refresh();
+
+        final String customPreference = randomAlphaOfLength(10);
+
+        final String nodeId = client().prepareSearch("test").setQuery(matchAllQuery()).setPreference(customPreference)
+            .get().getHits().getAt(0).getShard().getNodeId();
+
+        assertSearchesSpecificNode("test", customPreference, nodeId);
+
+        final int replicasInNewIndex = between(1, maximumNumberOfReplicas());
+        assertAcked(prepareCreate("test2").setSettings(
+            Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, replicasInNewIndex)));
+        ensureGreen();
+
+        assertSearchesSpecificNode("test", customPreference, nodeId);
+
+        assertAcked(client().admin().indices().prepareUpdateSettings("test2").setSettings(Settings.builder()
+            .put(SETTING_NUMBER_OF_REPLICAS, replicasInNewIndex - 1)));
+
+        assertSearchesSpecificNode("test", customPreference, nodeId);
+
+        assertAcked(client().admin().indices().prepareUpdateSettings("test2").setSettings(Settings.builder()
+            .put(SETTING_NUMBER_OF_REPLICAS, 0)
+            .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._name",
+                internalCluster().getDataNodeInstance(Node.class).settings().get(Node.NODE_NAME_SETTING.getKey()))));
+
+        ensureGreen();
+
+        assertSearchesSpecificNode("test", customPreference, nodeId);
+
+        assertAcked(client().admin().indices().prepareDelete("test2"));
+
+        assertSearchesSpecificNode("test", customPreference, nodeId);
+    }
+
+    private static void assertSearchesSpecificNode(String index, String customPreference, String nodeId) {
+        final SearchResponse searchResponse = client().prepareSearch(index).setQuery(matchAllQuery()).setPreference(customPreference).get();
+        assertThat(searchResponse.getHits().getHits().length, equalTo(1));
+        assertThat(searchResponse.getHits().getAt(0).getShard().getNodeId(), equalTo(nodeId));
+    }
 }

From 4d964194db8399fc34ab9e4a99f4538d542753bb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christoph=20B=C3=BCscher?= <cbuescher@posteo.de>
Date: Wed, 17 Apr 2019 18:44:07 +0200
Subject: [PATCH 066/112] Fix error applying `ignore_malformed` to boolean
 values (#41261)

The `ignore_malformed` option currently works on numeric fields only when the
bad value isn't a string value but not if it is a boolean. In this case we get a
parsing error from the xContent parser which we need to catch in addition to the
field mapper.

Closes #11498
---
 .../index/mapper/NumberFieldMapper.java       |  6 +-
 .../index/mapper/NumberFieldMapperTests.java  | 94 ++++++++++++-------
 2 files changed, 63 insertions(+), 37 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java
index 06e12ca8b5e..927bce5d9d6 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java
@@ -19,6 +19,8 @@
 
 package org.elasticsearch.index.mapper;
 
+import com.fasterxml.jackson.core.JsonParseException;
+
 import org.apache.lucene.document.DoublePoint;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FloatPoint;
@@ -1042,8 +1044,8 @@ public class NumberFieldMapper extends FieldMapper {
         } else {
             try {
                 numericValue = fieldType().type.parse(parser, coerce.value());
-            } catch (IllegalArgumentException e) {
-                if (ignoreMalformed.value()) {
+            } catch (IllegalArgumentException | JsonParseException e) {
+                if (ignoreMalformed.value() && parser.currentToken().isValue()) {
                     context.addIgnoredField(fieldType.name());
                     return;
                 } else {
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java
index b4b9242daa4..77953c0903f 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java
@@ -20,11 +20,14 @@
 package org.elasticsearch.index.mapper;
 
 import com.carrotsearch.randomizedtesting.annotations.Timeout;
+
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexableField;
 import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.xcontent.ToXContentObject;
+import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentFactory;
 import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType;
@@ -37,6 +40,7 @@ import java.util.Arrays;
 import java.util.HashSet;
 import java.util.List;
 
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
 import static org.hamcrest.Matchers.containsString;
 
 public class NumberFieldMapperTests extends AbstractNumericFieldMapperTestCase {
@@ -218,45 +222,65 @@ public class NumberFieldMapperTests extends AbstractNumericFieldMapperTestCase {
 
     public void testIgnoreMalformed() throws Exception {
         for (String type : TYPES) {
-            doTestIgnoreMalformed(type);
+            for (Object malformedValue : new Object[] { "a", Boolean.FALSE }) {
+                String mapping = Strings.toString(jsonBuilder().startObject().startObject("type").startObject("properties")
+                        .startObject("field").field("type", type).endObject().endObject().endObject().endObject());
+
+                DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
+
+                assertEquals(mapping, mapper.mappingSource().toString());
+
+                ThrowingRunnable runnable = () -> mapper.parse(new SourceToParse("test", "type", "1",
+                        BytesReference.bytes(jsonBuilder().startObject().field("field", malformedValue).endObject()), XContentType.JSON));
+                MapperParsingException e = expectThrows(MapperParsingException.class, runnable);
+                if (malformedValue instanceof String) {
+                    assertThat(e.getCause().getMessage(), containsString("For input string: \"a\""));
+                } else {
+                    assertThat(e.getCause().getMessage(), containsString("Current token"));
+                    assertThat(e.getCause().getMessage(), containsString("not numeric, can not use numeric value accessors"));
+                }
+
+                mapping = Strings.toString(jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field")
+                        .field("type", type).field("ignore_malformed", true).endObject().endObject().endObject().endObject());
+
+                DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(mapping));
+
+                ParsedDocument doc = mapper2.parse(new SourceToParse("test", "type", "1",
+                        BytesReference.bytes(jsonBuilder().startObject().field("field", malformedValue).endObject()), XContentType.JSON));
+
+                IndexableField[] fields = doc.rootDoc().getFields("field");
+                assertEquals(0, fields.length);
+                assertArrayEquals(new String[] { "field" }, doc.rootDoc().getValues("_ignored"));
+            }
         }
     }
 
-    private void doTestIgnoreMalformed(String type) throws IOException {
-        String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
-                .startObject("properties").startObject("field").field("type", type).endObject().endObject()
-                .endObject().endObject());
+    /**
+     * Test that in case the malformed value is an xContent object we throw error regardless of `ignore_malformed`
+     */
+    public void testIgnoreMalformedWithObject() throws Exception {
+        for (String type : TYPES) {
+            Object malformedValue = new ToXContentObject() {
+                @Override
+                public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+                    return builder.startObject().field("foo", "bar").endObject();
+                }
+            };
+            for (Boolean ignoreMalformed : new Boolean[] { true, false }) {
+                String mapping = Strings.toString(
+                        jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field").field("type", type)
+                                .field("ignore_malformed", ignoreMalformed).endObject().endObject().endObject().endObject());
+                DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
+                assertEquals(mapping, mapper.mappingSource().toString());
 
-        DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
-
-        assertEquals(mapping, mapper.mappingSource().toString());
-
-        ThrowingRunnable runnable = () -> mapper.parse(new SourceToParse("test", "type", "1", BytesReference
-                .bytes(XContentFactory.jsonBuilder()
-                        .startObject()
-                        .field("field", "a")
-                        .endObject()),
-                XContentType.JSON));
-        MapperParsingException e = expectThrows(MapperParsingException.class, runnable);
-
-        assertThat(e.getCause().getMessage(), containsString("For input string: \"a\""));
-
-        mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
-                .startObject("properties").startObject("field").field("type", type).field("ignore_malformed", true).endObject().endObject()
-                .endObject().endObject());
-
-        DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(mapping));
-
-        ParsedDocument doc = mapper2.parse(new SourceToParse("test", "type", "1", BytesReference
-                .bytes(XContentFactory.jsonBuilder()
-                        .startObject()
-                        .field("field", "a")
-                        .endObject()),
-                XContentType.JSON));
-
-        IndexableField[] fields = doc.rootDoc().getFields("field");
-        assertEquals(0, fields.length);
-        assertArrayEquals(new String[] { "field" }, doc.rootDoc().getValues("_ignored"));
+                MapperParsingException e = expectThrows(MapperParsingException.class,
+                        () -> mapper.parse(new SourceToParse("test", "type", "1",
+                                BytesReference.bytes(jsonBuilder().startObject().field("field", malformedValue).endObject()),
+                                XContentType.JSON)));
+                assertThat(e.getCause().getMessage(), containsString("Current token"));
+                assertThat(e.getCause().getMessage(), containsString("not numeric, can not use numeric value accessors"));
+            }
+        }
     }
 
     public void testRejectNorms() throws IOException {

From 7e62ff28237f170a0476ee69478943c563b518c9 Mon Sep 17 00:00:00 2001
From: Zachary Tong <polyfractal@elastic.co>
Date: Wed, 17 Apr 2019 13:33:51 -0400
Subject: [PATCH 067/112] [Rollup] Validate timezones based on rules not string
 comparision (#36237)

The date_histogram internally converts obsolete timezones (such as
"Canada/Mountain") into their modern equivalent ("America/Edmonton").
But rollup just stored the TZ as provided by the user.

When checking the TZ for query validation we used a string comparison,
which would fail due to the date_histo's upgrading behavior.

Instead, we should convert both to a TimeZone object and check if their
rules are compatible.
---
 .../elasticsearch/common/time/DateUtils.java  | 121 +++++++
 .../rollup/job/DateHistogramGroupConfig.java  |  10 +-
 .../RollupDataExtractorFactory.java           |   4 +-
 .../rollup/RollupJobIdentifierUtils.java      |   9 +-
 .../xpack/rollup/RollupRequestTranslator.java |  52 +--
 .../action/TransportPutRollupJobAction.java   |  15 +
 .../action/TransportRollupSearchAction.java   |   4 +-
 .../xpack/rollup/job/RollupIndexer.java       |   9 -
 .../rollup/RollupJobIdentifierUtilTests.java  |  46 +++
 .../rollup/RollupRequestTranslationTests.java | 114 +------
 .../action/PutJobStateMachineTests.java       |  22 ++
 .../rollup/action/SearchActionTests.java      |   4 +-
 .../xpack/rollup/config/ConfigTests.java      |   5 +
 .../xpack/rollup/job/IndexerUtilsTests.java   |  84 +++++
 .../test/rollup/rollup_search.yml             | 304 ++++++++++++++++++
 15 files changed, 645 insertions(+), 158 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java
index 910934a8f50..e6bf6a65105 100644
--- a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java
+++ b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java
@@ -66,6 +66,127 @@ public class DateUtils {
         DEPRECATED_SHORT_TZ_IDS = tzs.keySet();
     }
 
+    // Map of deprecated timezones and their recommended new counterpart
+    public static final Map<String, String> DEPRECATED_LONG_TIMEZONES;
+    static {
+        Map<String, String> tzs = new HashMap<>();
+        tzs.put("Africa/Asmera","Africa/Nairobi");
+        tzs.put("Africa/Timbuktu","Africa/Abidjan");
+        tzs.put("America/Argentina/ComodRivadavia","America/Argentina/Catamarca");
+        tzs.put("America/Atka","America/Adak");
+        tzs.put("America/Buenos_Aires","America/Argentina/Buenos_Aires");
+        tzs.put("America/Catamarca","America/Argentina/Catamarca");
+        tzs.put("America/Coral_Harbour","America/Atikokan");
+        tzs.put("America/Cordoba","America/Argentina/Cordoba");
+        tzs.put("America/Ensenada","America/Tijuana");
+        tzs.put("America/Fort_Wayne","America/Indiana/Indianapolis");
+        tzs.put("America/Indianapolis","America/Indiana/Indianapolis");
+        tzs.put("America/Jujuy","America/Argentina/Jujuy");
+        tzs.put("America/Knox_IN","America/Indiana/Knox");
+        tzs.put("America/Louisville","America/Kentucky/Louisville");
+        tzs.put("America/Mendoza","America/Argentina/Mendoza");
+        tzs.put("America/Montreal","America/Toronto");
+        tzs.put("America/Porto_Acre","America/Rio_Branco");
+        tzs.put("America/Rosario","America/Argentina/Cordoba");
+        tzs.put("America/Santa_Isabel","America/Tijuana");
+        tzs.put("America/Shiprock","America/Denver");
+        tzs.put("America/Virgin","America/Port_of_Spain");
+        tzs.put("Antarctica/South_Pole","Pacific/Auckland");
+        tzs.put("Asia/Ashkhabad","Asia/Ashgabat");
+        tzs.put("Asia/Calcutta","Asia/Kolkata");
+        tzs.put("Asia/Chongqing","Asia/Shanghai");
+        tzs.put("Asia/Chungking","Asia/Shanghai");
+        tzs.put("Asia/Dacca","Asia/Dhaka");
+        tzs.put("Asia/Harbin","Asia/Shanghai");
+        tzs.put("Asia/Kashgar","Asia/Urumqi");
+        tzs.put("Asia/Katmandu","Asia/Kathmandu");
+        tzs.put("Asia/Macao","Asia/Macau");
+        tzs.put("Asia/Rangoon","Asia/Yangon");
+        tzs.put("Asia/Saigon","Asia/Ho_Chi_Minh");
+        tzs.put("Asia/Tel_Aviv","Asia/Jerusalem");
+        tzs.put("Asia/Thimbu","Asia/Thimphu");
+        tzs.put("Asia/Ujung_Pandang","Asia/Makassar");
+        tzs.put("Asia/Ulan_Bator","Asia/Ulaanbaatar");
+        tzs.put("Atlantic/Faeroe","Atlantic/Faroe");
+        tzs.put("Atlantic/Jan_Mayen","Europe/Oslo");
+        tzs.put("Australia/ACT","Australia/Sydney");
+        tzs.put("Australia/Canberra","Australia/Sydney");
+        tzs.put("Australia/LHI","Australia/Lord_Howe");
+        tzs.put("Australia/NSW","Australia/Sydney");
+        tzs.put("Australia/North","Australia/Darwin");
+        tzs.put("Australia/Queensland","Australia/Brisbane");
+        tzs.put("Australia/South","Australia/Adelaide");
+        tzs.put("Australia/Tasmania","Australia/Hobart");
+        tzs.put("Australia/Victoria","Australia/Melbourne");
+        tzs.put("Australia/West","Australia/Perth");
+        tzs.put("Australia/Yancowinna","Australia/Broken_Hill");
+        tzs.put("Brazil/Acre","America/Rio_Branco");
+        tzs.put("Brazil/DeNoronha","America/Noronha");
+        tzs.put("Brazil/East","America/Sao_Paulo");
+        tzs.put("Brazil/West","America/Manaus");
+        tzs.put("Canada/Atlantic","America/Halifax");
+        tzs.put("Canada/Central","America/Winnipeg");
+        tzs.put("Canada/East-Saskatchewan","America/Regina");
+        tzs.put("Canada/Eastern","America/Toronto");
+        tzs.put("Canada/Mountain","America/Edmonton");
+        tzs.put("Canada/Newfoundland","America/St_Johns");
+        tzs.put("Canada/Pacific","America/Vancouver");
+        tzs.put("Canada/Yukon","America/Whitehorse");
+        tzs.put("Chile/Continental","America/Santiago");
+        tzs.put("Chile/EasterIsland","Pacific/Easter");
+        tzs.put("Cuba","America/Havana");
+        tzs.put("Egypt","Africa/Cairo");
+        tzs.put("Eire","Europe/Dublin");
+        tzs.put("Europe/Belfast","Europe/London");
+        tzs.put("Europe/Tiraspol","Europe/Chisinau");
+        tzs.put("GB","Europe/London");
+        tzs.put("GB-Eire","Europe/London");
+        tzs.put("Greenwich","Etc/GMT");
+        tzs.put("Hongkong","Asia/Hong_Kong");
+        tzs.put("Iceland","Atlantic/Reykjavik");
+        tzs.put("Iran","Asia/Tehran");
+        tzs.put("Israel","Asia/Jerusalem");
+        tzs.put("Jamaica","America/Jamaica");
+        tzs.put("Japan","Asia/Tokyo");
+        tzs.put("Kwajalein","Pacific/Kwajalein");
+        tzs.put("Libya","Africa/Tripoli");
+        tzs.put("Mexico/BajaNorte","America/Tijuana");
+        tzs.put("Mexico/BajaSur","America/Mazatlan");
+        tzs.put("Mexico/General","America/Mexico_City");
+        tzs.put("NZ","Pacific/Auckland");
+        tzs.put("NZ-CHAT","Pacific/Chatham");
+        tzs.put("Navajo","America/Denver");
+        tzs.put("PRC","Asia/Shanghai");
+        tzs.put("Pacific/Johnston","Pacific/Honolulu");
+        tzs.put("Pacific/Ponape","Pacific/Pohnpei");
+        tzs.put("Pacific/Samoa","Pacific/Pago_Pago");
+        tzs.put("Pacific/Truk","Pacific/Chuuk");
+        tzs.put("Pacific/Yap","Pacific/Chuuk");
+        tzs.put("Poland","Europe/Warsaw");
+        tzs.put("Portugal","Europe/Lisbon");
+        tzs.put("ROC","Asia/Taipei");
+        tzs.put("ROK","Asia/Seoul");
+        tzs.put("Singapore","Asia/Singapore");
+        tzs.put("Turkey","Europe/Istanbul");
+        tzs.put("UCT","Etc/UCT");
+        tzs.put("US/Alaska","America/Anchorage");
+        tzs.put("US/Aleutian","America/Adak");
+        tzs.put("US/Arizona","America/Phoenix");
+        tzs.put("US/Central","America/Chicago");
+        tzs.put("US/East-Indiana","America/Indiana/Indianapolis");
+        tzs.put("US/Eastern","America/New_York");
+        tzs.put("US/Hawaii","Pacific/Honolulu");
+        tzs.put("US/Indiana-Starke","America/Indiana/Knox");
+        tzs.put("US/Michigan","America/Detroit");
+        tzs.put("US/Mountain","America/Denver");
+        tzs.put("US/Pacific","America/Los_Angeles");
+        tzs.put("US/Samoa","Pacific/Pago_Pago");
+        tzs.put("Universal","Etc/UTC");
+        tzs.put("W-SU","Europe/Moscow");
+        tzs.put("Zulu","Etc/UTC");
+        DEPRECATED_LONG_TIMEZONES = Collections.unmodifiableMap(tzs);
+    }
+
     public static ZoneId dateTimeZoneToZoneId(DateTimeZone timeZone) {
         if (timeZone == null) {
             return null;
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java
index f4fee8acc3d..c9fe0c644a8 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java
@@ -24,6 +24,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInter
 
 import java.io.IOException;
 import java.time.ZoneId;
+import java.time.ZoneOffset;
 import java.util.Map;
 import java.util.Objects;
 
@@ -52,7 +53,8 @@ public class DateHistogramGroupConfig implements Writeable, ToXContentObject {
     private static final String FIELD = "field";
     public static final String TIME_ZONE = "time_zone";
     public static final String DELAY = "delay";
-    private static final String DEFAULT_TIMEZONE = "UTC";
+    public static final String DEFAULT_TIMEZONE = "UTC";
+    public static final ZoneId DEFAULT_ZONEID_TIMEZONE = ZoneOffset.UTC;
     private static final ConstructingObjectParser<DateHistogramGroupConfig, Void> PARSER;
     static {
         PARSER = new ConstructingObjectParser<>(NAME, a ->
@@ -210,12 +212,12 @@ public class DateHistogramGroupConfig implements Writeable, ToXContentObject {
         return Objects.equals(interval, that.interval)
             && Objects.equals(field, that.field)
             && Objects.equals(delay, that.delay)
-            && Objects.equals(timeZone, that.timeZone);
+            && ZoneId.of(timeZone, ZoneId.SHORT_IDS).getRules().equals(ZoneId.of(that.timeZone, ZoneId.SHORT_IDS).getRules());
     }
 
     @Override
     public int hashCode() {
-        return Objects.hash(interval, field, delay, timeZone);
+        return Objects.hash(interval, field, delay, ZoneId.of(timeZone));
     }
 
     @Override
@@ -235,7 +237,7 @@ public class DateHistogramGroupConfig implements Writeable, ToXContentObject {
         } else {
             rounding = new Rounding.Builder(TimeValue.parseTimeValue(expr, "createRounding"));
         }
-        rounding.timeZone(ZoneId.of(timeZone));
+        rounding.timeZone(ZoneId.of(timeZone, ZoneId.SHORT_IDS));
         return rounding.build();
     }
 }
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java
index 4971ad83879..8264d3e15fd 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java
@@ -23,6 +23,8 @@ import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps.RollupFieldCaps;
 import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig;
 import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory;
 
+import java.time.ZoneId;
+import java.time.ZoneOffset;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -124,7 +126,7 @@ public class RollupDataExtractorFactory implements DataExtractorFactory {
         if (rollupJobGroupConfig.hasDatehistogram() == false) {
             return false;
         }
-        if ("UTC".equalsIgnoreCase(rollupJobGroupConfig.getTimezone()) == false) {
+        if (ZoneId.of(rollupJobGroupConfig.getTimezone()).getRules().equals(ZoneOffset.UTC.getRules()) == false) {
             return false;
         }
         try {
diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java
index 59141d2a83a..95b5069edcf 100644
--- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java
+++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java
@@ -17,6 +17,7 @@ import org.elasticsearch.xpack.core.rollup.RollupField;
 import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps;
 import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig;
 
+import java.time.ZoneId;
 import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.HashSet;
@@ -96,11 +97,13 @@ public class RollupJobIdentifierUtils {
                     if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) {
                         DateHistogramInterval interval = new DateHistogramInterval((String)agg.get(RollupField.INTERVAL));
 
-                        String thisTimezone  = (String)agg.get(DateHistogramGroupConfig.TIME_ZONE);
-                        String sourceTimeZone = source.timeZone() == null ? "UTC" : source.timeZone().toString();
+                        ZoneId thisTimezone = ZoneId.of(((String) agg.get(DateHistogramGroupConfig.TIME_ZONE)), ZoneId.SHORT_IDS);
+                        ZoneId sourceTimeZone = source.timeZone() == null
+                            ? DateHistogramGroupConfig.DEFAULT_ZONEID_TIMEZONE
+                            : ZoneId.of(source.timeZone().toString(), ZoneId.SHORT_IDS);
 
                         // Ensure we are working on the same timezone
-                        if (thisTimezone.equalsIgnoreCase(sourceTimeZone) == false) {
+                        if (thisTimezone.getRules().equals(sourceTimeZone.getRules()) == false) {
                             continue;
                         }
                         if (source.dateHistogramInterval() != null) {
diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java
index 45462681198..7cf8f8d1293 100644
--- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java
+++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java
@@ -11,8 +11,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
 import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
 import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
 import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.index.query.QueryBuilder;
-import org.elasticsearch.index.query.TermQueryBuilder;
 import org.elasticsearch.search.aggregations.AggregationBuilder;
 import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
 import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
@@ -22,8 +20,8 @@ import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder;
 import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
 import org.elasticsearch.xpack.core.rollup.RollupField;
 import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig;
-import org.joda.time.DateTimeZone;
 
+import java.time.ZoneId;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -47,7 +45,7 @@ import java.util.function.Supplier;
  * }</pre>
  *
  *
- * The only publicly "consumable" API is {@link #translateAggregation(AggregationBuilder, List, NamedWriteableRegistry)}.
+ * The only publicly "consumable" API is {@link #translateAggregation(AggregationBuilder, NamedWriteableRegistry)}.
  */
 public class RollupRequestTranslator {
 
@@ -116,26 +114,22 @@ public class RollupRequestTranslator {
      * relevant method below.
      *
      * @param source           The source aggregation to translate into rollup-enabled version
-     * @param filterConditions A list used to track any filter conditions that sub-aggs may
-     *                         require.
      * @param registry  Registry containing the various aggregations so that we can easily
      *                  deserialize into a stream for cloning
      * @return  Returns the fully translated aggregation tree. Note that it returns a list instead
      *          of a single AggBuilder, since some aggregations (e.g. avg) may result in two
      *          translated aggs (sum + count)
      */
-    public static List<AggregationBuilder> translateAggregation(AggregationBuilder source,
-                                                                List<QueryBuilder> filterConditions,
-                                                                NamedWriteableRegistry registry) {
+    public static List<AggregationBuilder> translateAggregation(AggregationBuilder source, NamedWriteableRegistry registry) {
 
         if (source.getWriteableName().equals(DateHistogramAggregationBuilder.NAME)) {
-            return translateDateHistogram((DateHistogramAggregationBuilder) source, filterConditions, registry);
+            return translateDateHistogram((DateHistogramAggregationBuilder) source, registry);
         } else if (source.getWriteableName().equals(HistogramAggregationBuilder.NAME)) {
-            return translateHistogram((HistogramAggregationBuilder) source, filterConditions, registry);
+            return translateHistogram((HistogramAggregationBuilder) source, registry);
         } else if (RollupField.SUPPORTED_METRICS.contains(source.getWriteableName())) {
             return translateVSLeaf((ValuesSourceAggregationBuilder.LeafOnly)source, registry);
         } else if (source.getWriteableName().equals(TermsAggregationBuilder.NAME)) {
-            return translateTerms((TermsAggregationBuilder)source, filterConditions, registry);
+            return translateTerms((TermsAggregationBuilder)source, registry);
         } else {
             throw new IllegalArgumentException("Unable to translate aggregation tree into Rollup.  Aggregation ["
                     + source.getName() + "] is of type [" + source.getClass().getSimpleName() + "] which is " +
@@ -195,22 +189,13 @@ public class RollupRequestTranslator {
      *             <li>Field: `{timestamp field}.date_histogram._count`</li>
      *         </ul>
      *     </li>
-     *     <li>Add a filter condition:</li>
-     *     <li>
-     *         <ul>
-     *             <li>Query type: TermQuery</li>
-     *             <li>Field: `{timestamp_field}.date_histogram.interval`</li>
-     *             <li>Value: `{source interval}`</li>
-     *         </ul>
-     *     </li>
      * </ul>
      *
      */
     private static List<AggregationBuilder> translateDateHistogram(DateHistogramAggregationBuilder source,
-                                                                   List<QueryBuilder> filterConditions,
                                                                    NamedWriteableRegistry registry) {
 
-        return translateVSAggBuilder(source, filterConditions, registry, () -> {
+        return translateVSAggBuilder(source, registry, () -> {
             DateHistogramAggregationBuilder rolledDateHisto
                     = new DateHistogramAggregationBuilder(source.getName());
 
@@ -220,13 +205,9 @@ public class RollupRequestTranslator {
                 rolledDateHisto.interval(source.interval());
             }
 
-            String timezone = source.timeZone() == null ? DateTimeZone.UTC.toString() : source.timeZone().toString();
-            filterConditions.add(new TermQueryBuilder(RollupField.formatFieldName(source,
-                DateHistogramGroupConfig.TIME_ZONE), timezone));
+            ZoneId timeZone = source.timeZone() == null ? DateHistogramGroupConfig.DEFAULT_ZONEID_TIMEZONE : source.timeZone();
+            rolledDateHisto.timeZone(timeZone);
 
-            if (source.timeZone() != null) {
-                rolledDateHisto.timeZone(source.timeZone());
-            }
             rolledDateHisto.offset(source.offset());
             if (source.extendedBounds() != null) {
                 rolledDateHisto.extendedBounds(source.extendedBounds());
@@ -248,14 +229,13 @@ public class RollupRequestTranslator {
      * Notably, it adds a Sum metric to calculate the doc_count in each bucket.
      *
      * Conventions are identical to a date_histogram (excepting date-specific details), so see
-     * {@link #translateDateHistogram(DateHistogramAggregationBuilder, List, NamedWriteableRegistry)} for
+     * {@link #translateDateHistogram(DateHistogramAggregationBuilder, NamedWriteableRegistry)} for
      * a complete list of conventions, examples, etc
      */
     private static List<AggregationBuilder> translateHistogram(HistogramAggregationBuilder source,
-                                                               List<QueryBuilder> filterConditions,
                                                                NamedWriteableRegistry registry) {
 
-        return translateVSAggBuilder(source, filterConditions, registry, () -> {
+        return translateVSAggBuilder(source, registry, () -> {
             HistogramAggregationBuilder rolledHisto
                     = new HistogramAggregationBuilder(source.getName());
 
@@ -328,10 +308,9 @@ public class RollupRequestTranslator {
      *
      */
     private static List<AggregationBuilder> translateTerms(TermsAggregationBuilder source,
-                                                           List<QueryBuilder> filterConditions,
                                                            NamedWriteableRegistry registry) {
 
-        return translateVSAggBuilder(source, filterConditions, registry, () -> {
+        return translateVSAggBuilder(source, registry, () -> {
             TermsAggregationBuilder rolledTerms
                     = new TermsAggregationBuilder(source.getName(), source.valueType());
             rolledTerms.field(RollupField.formatFieldName(source, RollupField.VALUE));
@@ -359,8 +338,6 @@ public class RollupRequestTranslator {
      * ValueSourceBuilder.  This method is called by all the agg-specific methods (e.g. translateDateHistogram())
      *
      * @param source The source aggregation that we wish to translate
-     * @param filterConditions A list of existing filter conditions, in case we need to add some
-     *                         for this particular agg
      * @param registry Named registry for serializing leaf metrics.  Not actually used by this method,
      *                 but is passed downwards for leaf usage
      * @param factory A factory closure that generates a new shallow clone of the `source`. E.g. if `source` is
@@ -371,15 +348,14 @@ public class RollupRequestTranslator {
      * @return the translated multi-bucket ValueSourceAggBuilder
      */
     private static <T extends ValuesSourceAggregationBuilder> List<AggregationBuilder>
-        translateVSAggBuilder(ValuesSourceAggregationBuilder source, List<QueryBuilder> filterConditions,
-                          NamedWriteableRegistry registry, Supplier<T> factory) {
+        translateVSAggBuilder(ValuesSourceAggregationBuilder source, NamedWriteableRegistry registry, Supplier<T> factory) {
 
         T rolled = factory.get();
 
         // Translate all subaggs and add to the newly translated agg
         // NOTE: using for loop instead of stream because compiler explodes with a bug :/
         for (AggregationBuilder subAgg : source.getSubAggregations()) {
-            List<AggregationBuilder> translated = translateAggregation(subAgg, filterConditions, registry);
+            List<AggregationBuilder> translated = translateAggregation(subAgg, registry);
             for (AggregationBuilder t : translated) {
                 rolled.subAggregation(t);
             }
diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java
index cb04f5554b4..db6c1c5ddea 100644
--- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java
+++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java
@@ -5,6 +5,7 @@
  */
 package org.elasticsearch.xpack.rollup.action;
 
+import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 import org.elasticsearch.ElasticsearchException;
 import org.elasticsearch.ElasticsearchStatusException;
@@ -32,6 +33,8 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.CheckedConsumer;
 import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.DeprecationLogger;
+import org.elasticsearch.common.time.DateUtils;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.license.LicenseUtils;
@@ -57,6 +60,8 @@ public class TransportPutRollupJobAction extends TransportMasterNodeAction<PutRo
     private final XPackLicenseState licenseState;
     private final PersistentTasksService persistentTasksService;
     private final Client client;
+    private static final DeprecationLogger deprecationLogger
+        = new DeprecationLogger(LogManager.getLogger(TransportPutRollupJobAction.class));
 
     @Inject
     public TransportPutRollupJobAction(TransportService transportService, ThreadPool threadPool,
@@ -90,6 +95,7 @@ public class TransportPutRollupJobAction extends TransportMasterNodeAction<PutRo
         }
 
         XPackPlugin.checkReadyForXPackCustomMetadata(clusterState);
+        checkForDeprecatedTZ(request);
 
         FieldCapabilitiesRequest fieldCapsRequest = new FieldCapabilitiesRequest()
             .indices(request.getConfig().getIndexPattern())
@@ -115,6 +121,15 @@ public class TransportPutRollupJobAction extends TransportMasterNodeAction<PutRo
         });
     }
 
+    static void checkForDeprecatedTZ(PutRollupJobAction.Request request) {
+        String timeZone = request.getConfig().getGroupConfig().getDateHistogram().getTimeZone();
+        String modernTZ = DateUtils.DEPRECATED_LONG_TIMEZONES.get(timeZone);
+        if (modernTZ != null) {
+            deprecationLogger.deprecated("Creating Rollup job [" + request.getConfig().getId() + "] with timezone ["
+                + timeZone + "], but [" + timeZone + "] has been deprecated by the IANA.  Use [" + modernTZ +"] instead.");
+        }
+    }
+
     private static RollupJob createRollupJob(RollupJobConfig config, ThreadPool threadPool) {
         // ensure we only filter for the allowed headers
         Map<String, String> filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream()
diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java
index e85a92c0613..414a0d08ef3 100644
--- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java
+++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java
@@ -173,10 +173,12 @@ public class TransportRollupSearchAction extends TransportAction<SearchRequest,
 
         for (AggregationBuilder agg : sourceAgg.getAggregatorFactories()) {
 
+            // TODO this filter agg is now redundant given we filter on job ID
+            // in the query and the translator doesn't add any clauses anymore
             List<QueryBuilder> filterConditions = new ArrayList<>(5);
 
             // Translate the agg tree, and collect any potential filtering clauses
-            List<AggregationBuilder> translatedAgg = RollupRequestTranslator.translateAggregation(agg, filterConditions, registry);
+            List<AggregationBuilder> translatedAgg = RollupRequestTranslator.translateAggregation(agg, registry);
 
             BoolQueryBuilder boolQuery = new BoolQueryBuilder();
             filterConditions.forEach(boolQuery::must);
diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java
index e051e912c48..daa888562e9 100644
--- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java
+++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java
@@ -41,7 +41,6 @@ import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats;
 import org.elasticsearch.xpack.core.rollup.job.RollupJob;
 import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig;
 import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig;
-import org.joda.time.DateTimeZone;
 
 import java.time.ZoneId;
 import java.util.ArrayList;
@@ -311,13 +310,5 @@ public abstract class RollupIndexer extends AsyncTwoPhaseIndexer<Map<String, Obj
         }
         return Collections.unmodifiableList(builders);
     }
-
-    private static DateTimeZone toDateTimeZone(final String timezone) {
-        try {
-            return DateTimeZone.forOffsetHours(Integer.parseInt(timezone));
-        } catch (NumberFormatException e) {
-            return DateTimeZone.forID(timezone);
-        }
-    }
 }
 
diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java
index d05a78e1212..614cbba72b6 100644
--- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java
+++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java
@@ -25,6 +25,7 @@ import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig;
 import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig;
 import org.joda.time.DateTimeZone;
 
+import java.time.ZoneId;
 import java.time.ZoneOffset;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -662,6 +663,51 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
         }
     }
 
+    public void testObsoleteTimezone() {
+        // Job has "obsolete" timezone
+        DateHistogramGroupConfig dateHisto = new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "Canada/Mountain");
+        GroupConfig group = new GroupConfig(dateHisto);
+        RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10,  group, emptyList(), null);
+        RollupJobCaps cap = new RollupJobCaps(job);
+        Set<RollupJobCaps> caps = singletonSet(cap);
+
+        DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo")
+            .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval())
+            .timeZone(ZoneId.of("Canada/Mountain"));
+
+        Set<RollupJobCaps> bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps);
+        assertThat(bestCaps.size(), equalTo(1));
+
+        builder = new DateHistogramAggregationBuilder("foo").field("foo")
+            .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval())
+            .timeZone(ZoneId.of("America/Edmonton"));
+
+        bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps);
+        assertThat(bestCaps.size(), equalTo(1));
+
+        // now the reverse, job has "new" timezone
+
+        dateHisto = new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "America/Edmonton");
+        group = new GroupConfig(dateHisto);
+        job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10,  group, emptyList(), null);
+        cap = new RollupJobCaps(job);
+        caps = singletonSet(cap);
+
+        builder = new DateHistogramAggregationBuilder("foo").field("foo")
+            .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval())
+            .timeZone(ZoneId.of("Canada/Mountain"));
+
+        bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps);
+        assertThat(bestCaps.size(), equalTo(1));
+
+        builder = new DateHistogramAggregationBuilder("foo").field("foo")
+            .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval())
+            .timeZone(ZoneId.of("America/Edmonton"));
+
+        bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps);
+        assertThat(bestCaps.size(), equalTo(1));
+    }
+
     private static long getMillis(RollupJobCaps cap) {
         for (RollupJobCaps.RollupFieldCaps fieldCaps : cap.getFieldCaps().values()) {
             for (Map<String, Object> agg : fieldCaps.getAggs()) {
diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java
index f691d10d20d..db58115489d 100644
--- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java
+++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java
@@ -9,8 +9,6 @@ package org.elasticsearch.xpack.rollup;
 import org.elasticsearch.common.geo.GeoPoint;
 import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
 import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.index.query.QueryBuilder;
-import org.elasticsearch.index.query.TermQueryBuilder;
 import org.elasticsearch.search.SearchModule;
 import org.elasticsearch.search.aggregations.AggregationBuilder;
 import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
@@ -33,7 +31,6 @@ import org.junit.Before;
 import java.io.IOException;
 import java.time.ZoneId;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.function.Function;
@@ -65,9 +62,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
                 .extendedBounds(new ExtendedBounds(0L, 1000L))
                 .subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
                 .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
-        List<QueryBuilder> filterConditions = new ArrayList<>();
 
-        List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry);
+        List<AggregationBuilder> translated = translateAggregation(histo, namedWriteableRegistry);
         assertThat(translated.size(), equalTo(1));
         assertThat(translated.get(0), Matchers.instanceOf(DateHistogramAggregationBuilder.class));
         DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0);
@@ -93,22 +89,6 @@ public class RollupRequestTranslationTests extends ESTestCase {
         assertThat(subAggs.get("test_histo._count"), Matchers.instanceOf(SumAggregationBuilder.class));
         assertThat(((SumAggregationBuilder)subAggs.get("test_histo._count")).field(),
                 equalTo("foo.date_histogram._count"));
-
-        assertThat(filterConditions.size(), equalTo(1));
-        for (QueryBuilder q : filterConditions) {
-            if (q instanceof TermQueryBuilder) {
-                switch (((TermQueryBuilder) q).fieldName()) {
-                    case "foo.date_histogram.time_zone":
-                        assertThat(((TermQueryBuilder) q).value(), equalTo("UTC"));
-                        break;
-                    default:
-                        fail("Unexpected Term Query in filter conditions: [" + ((TermQueryBuilder) q).fieldName() + "]");
-                        break;
-                }
-            } else {
-                fail("Unexpected query builder in filter conditions");
-            }
-        }
     }
 
     public void testFormattedDateHisto() {
@@ -118,9 +98,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
             .extendedBounds(new ExtendedBounds(0L, 1000L))
             .format("yyyy-MM-dd")
             .subAggregation(new MaxAggregationBuilder("the_max").field("max_field"));
-        List<QueryBuilder> filterConditions = new ArrayList<>();
 
-        List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry);
+        List<AggregationBuilder> translated = translateAggregation(histo, namedWriteableRegistry);
         assertThat(translated.size(), equalTo(1));
         assertThat(translated.get(0), Matchers.instanceOf(DateHistogramAggregationBuilder.class));
         DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0);
@@ -133,7 +112,6 @@ public class RollupRequestTranslationTests extends ESTestCase {
     public void testSimpleMetric() {
         int i = ESTestCase.randomIntBetween(0, 2);
         List<AggregationBuilder> translated = new ArrayList<>();
-        List<QueryBuilder> filterConditions = new ArrayList<>();
 
         Class clazz = null;
         String fieldName = null;
@@ -141,17 +119,17 @@ public class RollupRequestTranslationTests extends ESTestCase {
 
         if (i == 0) {
             translated = translateAggregation(new MaxAggregationBuilder("test_metric")
-                    .field("foo"), filterConditions, namedWriteableRegistry);
+                    .field("foo"), namedWriteableRegistry);
             clazz = MaxAggregationBuilder.class;
             fieldName =  "foo.max.value";
         } else if (i == 1) {
             translated = translateAggregation(new MinAggregationBuilder("test_metric")
-                    .field("foo"), filterConditions, namedWriteableRegistry);
+                    .field("foo"), namedWriteableRegistry);
             clazz = MinAggregationBuilder.class;
             fieldName =  "foo.min.value";
         } else if (i == 2) {
             translated = translateAggregation(new SumAggregationBuilder("test_metric")
-                    .field("foo"), filterConditions, namedWriteableRegistry);
+                    .field("foo"), namedWriteableRegistry);
             clazz = SumAggregationBuilder.class;
             fieldName =  "foo.sum.value";
         }
@@ -160,14 +138,12 @@ public class RollupRequestTranslationTests extends ESTestCase {
         assertThat(translated.get(0), Matchers.instanceOf(clazz));
         assertThat((translated.get(0)).getName(), equalTo("test_metric"));
         assertThat(((ValuesSourceAggregationBuilder)translated.get(0)).field(), equalTo(fieldName));
-
-        assertThat(filterConditions.size(), equalTo(0));
     }
 
     public void testUnsupportedMetric() {
         IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
                 () -> translateAggregation(new StatsAggregationBuilder("test_metric")
-                        .field("foo"), Collections.emptyList(), namedWriteableRegistry));
+                        .field("foo"), namedWriteableRegistry));
         assertThat(e.getMessage(), equalTo("Unable to translate aggregation tree into Rollup.  Aggregation [test_metric] is of type " +
                 "[StatsAggregationBuilder] which is currently unsupported."));
     }
@@ -178,9 +154,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
                 .field("foo")
                 .subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
                 .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
-        List<QueryBuilder> filterConditions = new ArrayList<>();
 
-        List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry);
+        List<AggregationBuilder> translated = translateAggregation(histo, namedWriteableRegistry);
         assertThat(translated.size(), equalTo(1));
         assertThat(translated.get(0), instanceOf(DateHistogramAggregationBuilder.class));
         DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0);
@@ -206,20 +181,6 @@ public class RollupRequestTranslationTests extends ESTestCase {
         assertThat(subAggs.get("test_histo._count"), instanceOf(SumAggregationBuilder.class));
         assertThat(((SumAggregationBuilder)subAggs.get("test_histo._count")).field(),
                 equalTo("foo.date_histogram._count"));
-
-        assertThat(filterConditions.size(), equalTo(1));
-
-        for (QueryBuilder q : filterConditions) {
-            if (q instanceof TermQueryBuilder) {
-               if (((TermQueryBuilder) q).fieldName().equals("foo.date_histogram.time_zone")) {
-                    assertThat(((TermQueryBuilder) q).value(), equalTo("UTC"));
-                } else {
-                    fail("Unexpected Term Query in filter conditions: [" + ((TermQueryBuilder) q).fieldName() + "]");
-                }
-            } else {
-                fail("Unexpected query builder in filter conditions");
-            }
-        }
     }
 
     public void testDateHistoLongIntervalWithMinMax() {
@@ -228,9 +189,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
                 .field("foo")
                 .subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
                 .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
-        List<QueryBuilder> filterConditions = new ArrayList<>();
 
-        List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry);
+        List<AggregationBuilder> translated = translateAggregation(histo, namedWriteableRegistry);
         assertThat(translated.size(), equalTo(1));
         assertThat(translated.get(0), instanceOf(DateHistogramAggregationBuilder.class));
         DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0);
@@ -256,20 +216,6 @@ public class RollupRequestTranslationTests extends ESTestCase {
         assertThat(subAggs.get("test_histo._count"), instanceOf(SumAggregationBuilder.class));
         assertThat(((SumAggregationBuilder)subAggs.get("test_histo._count")).field(),
                 equalTo("foo.date_histogram._count"));
-
-        assertThat(filterConditions.size(), equalTo(1));
-
-        for (QueryBuilder q : filterConditions) {
-            if (q instanceof TermQueryBuilder) {
-                if (((TermQueryBuilder) q).fieldName().equals("foo.date_histogram.time_zone")) {
-                    assertThat(((TermQueryBuilder) q).value(), equalTo("UTC"));
-                }  else {
-                    fail("Unexpected Term Query in filter conditions: [" + ((TermQueryBuilder) q).fieldName() + "]");
-                }
-            } else {
-                fail("Unexpected query builder in filter conditions");
-            }
-        }
     }
 
     public void testDateHistoWithTimezone() {
@@ -278,9 +224,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
         histo.interval(86400000)
             .field("foo")
             .timeZone(timeZone);
-        List<QueryBuilder> filterConditions = new ArrayList<>();
 
-        List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry);
+        List<AggregationBuilder> translated = translateAggregation(histo, namedWriteableRegistry);
         assertThat(translated.size(), equalTo(1));
         assertThat(translated.get(0), instanceOf(DateHistogramAggregationBuilder.class));
         DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0);
@@ -288,25 +233,11 @@ public class RollupRequestTranslationTests extends ESTestCase {
         assertThat(translatedHisto.interval(), equalTo(86400000L));
         assertThat(translatedHisto.field(), equalTo("foo.date_histogram.timestamp"));
         assertThat(translatedHisto.timeZone(), equalTo(timeZone));
-        assertThat(filterConditions.size(), equalTo(1));
-
-        for (QueryBuilder q : filterConditions) {
-            if (q instanceof TermQueryBuilder) {
-                if (((TermQueryBuilder) q).fieldName().equals("foo.date_histogram.time_zone")) {
-                    assertThat(((TermQueryBuilder) q).value(), equalTo(timeZone.toString()));
-                }  else {
-                    fail("Unexpected Term Query in filter conditions: [" + ((TermQueryBuilder) q).fieldName() + "]");
-                }
-            } else {
-                fail("Unexpected query builder in filter conditions");
-            }
-        }
     }
 
     public void testAvgMetric() {
-        List<QueryBuilder> filterConditions = new ArrayList<>();
         List<AggregationBuilder> translated = translateAggregation(new AvgAggregationBuilder("test_metric")
-                .field("foo"), filterConditions, namedWriteableRegistry);
+                .field("foo"), namedWriteableRegistry);
 
         assertThat(translated.size(), equalTo(2));
         Map<String, AggregationBuilder> metrics = translated.stream()
@@ -319,8 +250,6 @@ public class RollupRequestTranslationTests extends ESTestCase {
         assertThat(metrics.get("test_metric._count"), Matchers.instanceOf(SumAggregationBuilder.class));
         assertThat(((SumAggregationBuilder)metrics.get("test_metric._count")).field(),
                 equalTo("foo.avg._count"));
-
-        assertThat(filterConditions.size(), equalTo(0));
     }
 
     public void testStringTerms() throws IOException {
@@ -329,9 +258,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
         terms.field("foo")
                 .subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
                 .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
-        List<QueryBuilder> filterConditions = new ArrayList<>();
 
-        List<AggregationBuilder> translated = translateAggregation(terms, filterConditions, namedWriteableRegistry);
+        List<AggregationBuilder> translated = translateAggregation(terms, namedWriteableRegistry);
         assertThat(translated.size(), equalTo(1));
         assertThat(translated.get(0), Matchers.instanceOf(TermsAggregationBuilder.class));
         TermsAggregationBuilder translatedHisto = (TermsAggregationBuilder)translated.get(0);
@@ -356,8 +284,6 @@ public class RollupRequestTranslationTests extends ESTestCase {
         assertThat(subAggs.get("test_string_terms._count"), Matchers.instanceOf(SumAggregationBuilder.class));
         assertThat(((SumAggregationBuilder)subAggs.get("test_string_terms._count")).field(),
                 equalTo("foo.terms._count"));
-
-        assertThat(filterConditions.size(), equalTo(0));
     }
 
     public void testBasicHisto() {
@@ -368,9 +294,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
                 .extendedBounds(0.0, 1000.0)
                 .subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
                 .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
-        List<QueryBuilder> filterConditions = new ArrayList<>();
 
-        List<AggregationBuilder> translated = translateAggregation(histo, filterConditions, namedWriteableRegistry);
+        List<AggregationBuilder> translated = translateAggregation(histo, namedWriteableRegistry);
         assertThat(translated.size(), equalTo(1));
         assertThat(translated.get(0), Matchers.instanceOf(HistogramAggregationBuilder.class));
         HistogramAggregationBuilder translatedHisto = (HistogramAggregationBuilder)translated.get(0);
@@ -396,18 +321,6 @@ public class RollupRequestTranslationTests extends ESTestCase {
         assertThat(((SumAggregationBuilder)subAggs.get("test_histo._count")).field(),
                 equalTo("foo.histogram._count"));
 
-        assertThat(filterConditions.size(), equalTo(0));
-        for (QueryBuilder q : filterConditions) {
-            if (q instanceof TermQueryBuilder) {
-                switch (((TermQueryBuilder) q).fieldName()) {
-                    default:
-                        fail("Unexpected Term Query in filter conditions: [" + ((TermQueryBuilder) q).fieldName() + "]");
-                        break;
-                }
-            } else {
-                fail("Unexpected query builder in filter conditions");
-            }
-        }
     }
 
     public void testUnsupportedAgg() {
@@ -415,10 +328,9 @@ public class RollupRequestTranslationTests extends ESTestCase {
         geo.field("foo")
                 .subAggregation(new MaxAggregationBuilder("the_max").field("max_field"))
                 .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field"));
-        List<QueryBuilder> filterConditions = new ArrayList<>();
 
         Exception e = expectThrows(RuntimeException.class,
-                () -> translateAggregation(geo, filterConditions, namedWriteableRegistry));
+                () -> translateAggregation(geo, namedWriteableRegistry));
         assertThat(e.getMessage(), equalTo("Unable to translate aggregation tree into Rollup.  Aggregation [test_geo] is of type " +
                 "[GeoDistanceAggregationBuilder] which is currently unsupported."));
     }
diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java
index 3d346456ea9..3f49609953e 100644
--- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java
+++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java
@@ -23,9 +23,13 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
 import org.elasticsearch.common.collect.ImmutableOpenMap;
 import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
 import org.elasticsearch.persistent.PersistentTasksService;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
 import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers;
 import org.elasticsearch.xpack.core.rollup.RollupField;
+import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction;
+import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig;
+import org.elasticsearch.xpack.core.rollup.job.GroupConfig;
 import org.elasticsearch.xpack.core.rollup.job.RollupJob;
 import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig;
 import org.elasticsearch.xpack.rollup.Rollup;
@@ -424,4 +428,22 @@ public class PutJobStateMachineTests extends ESTestCase {
         verify(tasksService).sendStartRequest(eq(job.getConfig().getId()), eq(RollupField.TASK_NAME), eq(job), any());
         verify(tasksService).waitForPersistentTaskCondition(eq(job.getConfig().getId()), any(), any(), any());
     }
+
+    public void testDeprecatedTimeZone() {
+        GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "Japan"));
+        RollupJobConfig config = new RollupJobConfig("foo", randomAlphaOfLength(5), "rollup", ConfigTestHelpers.randomCron(),
+            100, groupConfig, Collections.emptyList(), null);
+        PutRollupJobAction.Request request = new PutRollupJobAction.Request(config);
+        TransportPutRollupJobAction.checkForDeprecatedTZ(request);
+        assertWarnings("Creating Rollup job [foo] with timezone [Japan], but [Japan] has been deprecated by the IANA.  " +
+            "Use [Asia/Tokyo] instead.");
+    }
+
+    public void testTimeZone() {
+        GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "EST"));
+        RollupJobConfig config = new RollupJobConfig("foo", randomAlphaOfLength(5), "rollup", ConfigTestHelpers.randomCron(),
+            100, groupConfig, Collections.emptyList(), null);
+        PutRollupJobAction.Request request = new PutRollupJobAction.Request(config);
+        TransportPutRollupJobAction.checkForDeprecatedTZ(request);
+    }
 }
diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java
index 5a851d17e5e..a795edca83e 100644
--- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java
+++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java
@@ -118,7 +118,7 @@ public class SearchActionTests extends ESTestCase {
         assertThat(e.getMessage(), equalTo("Unsupported Query in search request: [match_phrase]"));
     }
 
-    public void testRange() {
+    public void testRangeTimezoneUTC() {
         final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")));
         final RollupJobConfig config = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10,  groupConfig, emptyList(), null);
         RollupJobCaps cap = new RollupJobCaps(config);
@@ -127,6 +127,7 @@ public class SearchActionTests extends ESTestCase {
         QueryBuilder rewritten = TransportRollupSearchAction.rewriteQuery(new RangeQueryBuilder("foo").gt(1).timeZone("UTC"), caps);
         assertThat(rewritten, instanceOf(RangeQueryBuilder.class));
         assertThat(((RangeQueryBuilder)rewritten).fieldName(), equalTo("foo.date_histogram.timestamp"));
+        assertThat(((RangeQueryBuilder)rewritten).timeZone(), equalTo("UTC"));
     }
 
     public void testRangeNullTimeZone() {
@@ -138,6 +139,7 @@ public class SearchActionTests extends ESTestCase {
         QueryBuilder rewritten = TransportRollupSearchAction.rewriteQuery(new RangeQueryBuilder("foo").gt(1), caps);
         assertThat(rewritten, instanceOf(RangeQueryBuilder.class));
         assertThat(((RangeQueryBuilder)rewritten).fieldName(), equalTo("foo.date_histogram.timestamp"));
+        assertNull(((RangeQueryBuilder)rewritten).timeZone());
     }
 
     public void testRangeDifferentTZ() {
diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java
index 9f8796f4c95..2f0612a65d2 100644
--- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java
+++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java
@@ -90,6 +90,11 @@ public class ConfigTests extends ESTestCase {
         assertThat(e.getMessage(), equalTo("Unknown time-zone ID: FOO"));
     }
 
+    public void testObsoleteTimeZone() {
+        DateHistogramGroupConfig config = new DateHistogramGroupConfig("foo", DateHistogramInterval.HOUR, null, "Canada/Mountain");
+        assertThat(config.getTimeZone(), equalTo("Canada/Mountain"));
+    }
+
     public void testEmptyHistoField() {
         Exception e = expectThrows(IllegalArgumentException.class, () -> new HistogramGroupConfig(1L, (String[]) null));
         assertThat(e.getMessage(), equalTo("Fields must have at least one value"));
diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java
index cbf85e84b16..38b90328a87 100644
--- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java
+++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java
@@ -47,6 +47,7 @@ import org.joda.time.DateTime;
 import org.mockito.stubbing.Answer;
 
 import java.io.IOException;
+import java.time.ZoneId;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -561,6 +562,89 @@ public class IndexerUtilsTests extends AggregatorTestCase {
         }
     }
 
+    public void testTimezone() throws IOException {
+        String indexName = randomAlphaOfLengthBetween(1, 10);
+        RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+
+        String timestampField = "the_histo";
+        String valueField = "the_avg";
+
+        Directory directory = newDirectory();
+        RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+
+        {
+            Document document = new Document();
+            long timestamp = 1443659400000L; // 2015-10-01T00:30:00Z
+            document.add(new SortedNumericDocValuesField(timestampField, timestamp));
+            document.add(new LongPoint(timestampField, timestamp));
+            document.add(new SortedNumericDocValuesField(valueField, randomIntBetween(1, 100)));
+            indexWriter.addDocument(document);
+        }
+        {
+            Document document = new Document();
+            long timestamp = 1443663000000L; // 2015-10-01T01:30:00Z
+            document.add(new SortedNumericDocValuesField(timestampField, timestamp));
+            document.add(new LongPoint(timestampField, timestamp));
+            document.add(new SortedNumericDocValuesField(valueField, randomIntBetween(1, 100)));
+            indexWriter.addDocument(document);
+        }
+        indexWriter.close();
+
+        IndexReader indexReader = DirectoryReader.open(directory);
+        IndexSearcher indexSearcher = newIndexSearcher(indexReader);
+
+        DateFieldMapper.Builder builder = new DateFieldMapper.Builder(timestampField);
+        DateFieldMapper.DateFieldType timestampFieldType = builder.fieldType();
+        timestampFieldType.setHasDocValues(true);
+        timestampFieldType.setName(timestampField);
+
+        MappedFieldType valueFieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
+        valueFieldType.setName(valueField);
+        valueFieldType.setHasDocValues(true);
+        valueFieldType.setName(valueField);
+
+        // Setup the composite agg
+        DateHistogramValuesSourceBuilder dateHisto
+            = new DateHistogramValuesSourceBuilder("the_histo." + DateHistogramAggregationBuilder.NAME)
+            .field(timestampField)
+            .dateHistogramInterval(new DateHistogramInterval("1d"))
+            .timeZone(ZoneId.of("-01:00", ZoneId.SHORT_IDS));  // adds a timezone so that we aren't on default UTC
+
+        CompositeAggregationBuilder compositeBuilder = new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME,
+            singletonList(dateHisto));
+
+        MetricConfig metricConfig = new MetricConfig(valueField, singletonList("max"));
+        List<AggregationBuilder> metricAgg = createAggregationBuilders(singletonList(metricConfig));
+        metricAgg.forEach(compositeBuilder::subAggregation);
+
+        Aggregator aggregator = createAggregator(compositeBuilder, indexSearcher, timestampFieldType, valueFieldType);
+        aggregator.preCollection();
+        indexSearcher.search(new MatchAllDocsQuery(), aggregator);
+        aggregator.postCollection();
+        CompositeAggregation composite = (CompositeAggregation) aggregator.buildAggregation(0L);
+        indexReader.close();
+        directory.close();
+
+        final GroupConfig groupConfig = randomGroupConfig(random());
+        List<IndexRequest> docs = IndexerUtils.processBuckets(composite, indexName, stats, groupConfig, "foo", randomBoolean());
+
+        assertThat(docs.size(), equalTo(2));
+
+        Map<String, Object> map = docs.get(0).sourceAsMap();
+        assertNotNull(map.get(valueField + "." + MaxAggregationBuilder.NAME + "." + RollupField.VALUE));
+        assertThat(map.get("the_histo." + DateHistogramAggregationBuilder.NAME + "." + RollupField.COUNT_FIELD), equalTo(1));
+        assertThat(map.get("the_histo." + DateHistogramAggregationBuilder.NAME + "." + RollupField.TIMESTAMP),
+            equalTo(1443574800000L)); // 2015-09-30T00:00:00.000-01:00
+
+        map = docs.get(1).sourceAsMap();
+        assertNotNull(map.get(valueField + "." + MaxAggregationBuilder.NAME + "." + RollupField.VALUE));
+        assertThat(map.get("the_histo." + DateHistogramAggregationBuilder.NAME + "." + RollupField.COUNT_FIELD), equalTo(1));
+        assertThat(map.get("the_histo." + DateHistogramAggregationBuilder.NAME + "." + RollupField.TIMESTAMP),
+            equalTo(1443661200000L)); // 2015-10-01T00:00:00.000-01:00
+
+
+    }
+
     interface Mock {
         List<? extends CompositeAggregation.Bucket> getBuckets();
     }
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml
index a7765dfc15f..be9c9f4a41e 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml
@@ -881,6 +881,308 @@ setup:
                  interval: "1h"
                  time_zone: "UTC"
 
+---
+"Obsolete Timezone":
+  - skip:
+        version: " - 7.0.99"
+        reason: "IANA TZ deprecations in 7.1"
+        features: "warnings"
+  - do:
+      indices.create:
+        index: tz
+        body:
+          mappings:
+            properties:
+              timestamp:
+                type: date
+              partition:
+                type: keyword
+              price:
+                type: integer
+  - do:
+      headers:
+        Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
+      warnings:
+        - "Creating Rollup job [tz] with timezone [Canada/Mountain], but [Canada/Mountain] has been deprecated by the IANA.  Use [America/Edmonton] instead."
+      rollup.put_job:
+        id: tz
+        body:  >
+          {
+            "index_pattern": "tz",
+            "rollup_index": "tz_rollup",
+            "cron": "*/30 * * * * ?",
+            "page_size" :10,
+            "groups" : {
+              "date_histogram": {
+                "field": "timestamp",
+                "interval": "5m",
+                "time_zone": "Canada/Mountain"
+              },
+              "terms": {
+                "fields": ["partition"]
+              }
+            },
+            "metrics": [
+              {
+                "field": "price",
+                "metrics": ["max"]
+              }
+            ]
+          }
+
+  - do:
+      headers:
+        Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
+      bulk:
+        refresh: true
+        body:
+          - index:
+              _index: "tz_rollup"
+              _type: "_doc"
+          - timestamp.date_histogram.timestamp: 1531221000000
+            timestamp.date_histogram.interval: "5m"
+            timestamp.date_histogram.time_zone: "America/Edmonton"
+            timestamp.date_histogram._count: 1
+            partition.terms.value: "a"
+            partition.terms._count: 1
+            price.max.value: 1
+            "_rollup.id": "tz"
+            "_rollup.version": 2
+
+          - index:
+              _index: "tz_rollup"
+              _type: "_doc"
+          - timestamp.date_histogram.timestamp: 1531221300000
+            timestamp.date_histogram.interval: "5m"
+            timestamp.date_histogram.time_zone: "America/Edmonton"
+            timestamp.date_histogram._count: 2
+            partition.terms.value: "b"
+            partition.terms._count: 2
+            price.max.value: 2
+            "_rollup.id": "tz"
+            "_rollup.version": 2
+
+          - index:
+              _index: "tz_rollup"
+              _type: "_doc"
+          - timestamp.date_histogram.timestamp: 1531221600000
+            timestamp.date_histogram.interval: "5m"
+            timestamp.date_histogram.time_zone: "America/Edmonton"
+            timestamp.date_histogram._count: 10
+            partition.terms.value: "a"
+            partition.terms._count: 10
+            price.max.value: 3
+            "_rollup.id": "tz"
+            "_rollup.version": 2
+
+  - do:
+      rollup.rollup_search:
+        index: "tz_rollup"
+        body:
+          size: 0
+          aggs:
+            histo:
+              date_histogram:
+                field: "timestamp"
+                interval: "5m"
+                time_zone: "America/Edmonton"
+              aggs:
+                the_max:
+                  max:
+                    field: "price"
+
+  - length: { aggregations.histo.buckets: 3 }
+  - match: { aggregations.histo.buckets.0.key_as_string: "2018-07-10T05:10:00.000-06:00" }
+  - match: { aggregations.histo.buckets.0.doc_count: 1 }
+  - match: { aggregations.histo.buckets.0.the_max.value: 1 }
+  - match: { aggregations.histo.buckets.1.key_as_string: "2018-07-10T05:15:00.000-06:00" }
+  - match: { aggregations.histo.buckets.1.doc_count: 2 }
+  - match: { aggregations.histo.buckets.1.the_max.value: 2 }
+  - match: { aggregations.histo.buckets.2.key_as_string: "2018-07-10T05:20:00.000-06:00" }
+  - match: { aggregations.histo.buckets.2.doc_count: 10 }
+  - match: { aggregations.histo.buckets.2.the_max.value: 3 }
+
+  - do:
+      rollup.rollup_search:
+        index: "tz_rollup"
+        body:
+          size: 0
+          aggs:
+            histo:
+              date_histogram:
+                field: "timestamp"
+                interval: "5m"
+                time_zone: "Canada/Mountain"
+              aggs:
+                the_max:
+                  max:
+                    field: "price"
+
+  - length: { aggregations.histo.buckets: 3 }
+  - match: { aggregations.histo.buckets.0.key_as_string: "2018-07-10T05:10:00.000-06:00" }
+  - match: { aggregations.histo.buckets.0.doc_count: 1 }
+  - match: { aggregations.histo.buckets.0.the_max.value: 1 }
+  - match: { aggregations.histo.buckets.1.key_as_string: "2018-07-10T05:15:00.000-06:00" }
+  - match: { aggregations.histo.buckets.1.doc_count: 2 }
+  - match: { aggregations.histo.buckets.1.the_max.value: 2 }
+  - match: { aggregations.histo.buckets.2.key_as_string: "2018-07-10T05:20:00.000-06:00" }
+  - match: { aggregations.histo.buckets.2.doc_count: 10 }
+  - match: { aggregations.histo.buckets.2.the_max.value: 3 }
+
+---
+"Obsolete BWC Timezone":
+  - skip:
+        version: " - 7.0.99"
+        reason: "IANA TZ deprecations in 7.1"
+  - do:
+      indices.create:
+        index: tz_rollup
+        body:
+          settings:
+            number_of_shards: 1
+            number_of_replicas: 0
+          mappings:
+            properties:
+              partition.terms.value:
+                type: keyword
+              partition.terms._count:
+                type: long
+              timestamp.date_histogram.time_zone:
+                type: keyword
+              timestamp.date_histogram.interval:
+                type: keyword
+              timestamp.date_histogram.timestamp:
+                type: date
+              timestamp.date_histogram._count:
+                type: long
+              price.max.value:
+                type: double
+              _rollup.id:
+                type: keyword
+              _rollup.version:
+                type: long
+            _meta:
+              _rollup:
+                sensor:
+                  cron: "* * * * * ?"
+                  rollup_index: "tz_rollup"
+                  index_pattern: "tz"
+                  timeout: "20s"
+                  page_size: 1000
+                  groups:
+                    date_histogram:
+                      field: "timestamp"
+                      interval: "5m"
+                      time_zone: "Canada/Mountain"
+                    terms:
+                      fields:
+                        - "partition"
+                  id: tz
+                  metrics:
+                    - field: "price"
+                      metrics:
+                        - max
+
+  - do:
+      headers:
+        Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
+      bulk:
+        refresh: true
+        body:
+          - index:
+              _index: "tz_rollup"
+              _type: "_doc"
+          - timestamp.date_histogram.timestamp: 1531221000000
+            timestamp.date_histogram.interval: "5m"
+            timestamp.date_histogram.time_zone: "Canada/Mountain"
+            timestamp.date_histogram._count: 1
+            partition.terms.value: "a"
+            partition.terms._count: 1
+            price.max.value: 1
+            "_rollup.id": "tz"
+            "_rollup.version": 2
+
+          - index:
+              _index: "tz_rollup"
+              _type: "_doc"
+          - timestamp.date_histogram.timestamp: 1531221300000
+            timestamp.date_histogram.interval: "5m"
+            timestamp.date_histogram.time_zone: "Canada/Mountain"
+            timestamp.date_histogram._count: 2
+            partition.terms.value: "b"
+            partition.terms._count: 2
+            price.max.value: 2
+            "_rollup.id": "tz"
+            "_rollup.version": 2
+
+          - index:
+              _index: "tz_rollup"
+              _type: "_doc"
+          - timestamp.date_histogram.timestamp: 1531221600000
+            timestamp.date_histogram.interval: "5m"
+            timestamp.date_histogram.time_zone: "Canada/Mountain"
+            timestamp.date_histogram._count: 10
+            partition.terms.value: "a"
+            partition.terms._count: 10
+            price.max.value: 3
+            "_rollup.id": "tz"
+            "_rollup.version": 2
+
+  - do:
+      rollup.rollup_search:
+        index: "tz_rollup"
+        body:
+          size: 0
+          aggs:
+            histo:
+              date_histogram:
+                field: "timestamp"
+                interval: "5m"
+                time_zone: "America/Edmonton"
+              aggs:
+                the_max:
+                  max:
+                    field: "price"
+
+  - length: { aggregations.histo.buckets: 3 }
+  - match: { aggregations.histo.buckets.0.key_as_string: "2018-07-10T05:10:00.000-06:00" }
+  - match: { aggregations.histo.buckets.0.doc_count: 1 }
+  - match: { aggregations.histo.buckets.0.the_max.value: 1 }
+  - match: { aggregations.histo.buckets.1.key_as_string: "2018-07-10T05:15:00.000-06:00" }
+  - match: { aggregations.histo.buckets.1.doc_count: 2 }
+  - match: { aggregations.histo.buckets.1.the_max.value: 2 }
+  - match: { aggregations.histo.buckets.2.key_as_string: "2018-07-10T05:20:00.000-06:00" }
+  - match: { aggregations.histo.buckets.2.doc_count: 10 }
+  - match: { aggregations.histo.buckets.2.the_max.value: 3 }
+
+
+  - do:
+      rollup.rollup_search:
+        index: "tz_rollup"
+        body:
+          size: 0
+          aggs:
+            histo:
+              date_histogram:
+                field: "timestamp"
+                interval: "5m"
+                time_zone: "Canada/Mountain"
+              aggs:
+                the_max:
+                  max:
+                    field: "price"
+
+  - length: { aggregations.histo.buckets: 3 }
+  - match: { aggregations.histo.buckets.0.key_as_string: "2018-07-10T05:10:00.000-06:00" }
+  - match: { aggregations.histo.buckets.0.doc_count: 1 }
+  - match: { aggregations.histo.buckets.0.the_max.value: 1 }
+  - match: { aggregations.histo.buckets.1.key_as_string: "2018-07-10T05:15:00.000-06:00" }
+  - match: { aggregations.histo.buckets.1.doc_count: 2 }
+  - match: { aggregations.histo.buckets.1.the_max.value: 2 }
+  - match: { aggregations.histo.buckets.2.key_as_string: "2018-07-10T05:20:00.000-06:00" }
+  - match: { aggregations.histo.buckets.2.doc_count: 10 }
+  - match: { aggregations.histo.buckets.2.the_max.value: 3 }
+
 
 ---
 "Search with typed_keys":
@@ -914,3 +1216,5 @@ setup:
   - match: { aggregations.date_histogram#histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" }
   - match: { aggregations.date_histogram#histo.buckets.3.doc_count: 20 }
   - match: { aggregations.date_histogram#histo.buckets.3.max#the_max.value: 4 }
+
+

From 66366d0307e7255be31f0e30dabbcfd4c96a7a8e Mon Sep 17 00:00:00 2001
From: Gordon Brown <gordon.brown@elastic.co>
Date: Wed, 17 Apr 2019 13:42:36 -0600
Subject: [PATCH 068/112] Extract template management from Watcher (#41169)

This commit extracts the template management from Watcher into an
abstract class, so that templates and lifecycle policies can be managed
in the same way across multiple plugins. This will be useful for SLM, as
well as potentially ILM and any other plugins which need to manage index
templates.
---
 .../core/template/IndexTemplateConfig.java    |  62 +++++
 .../core/template/IndexTemplateRegistry.java  | 229 ++++++++++++++++
 .../core/template/LifecyclePolicyConfig.java  |  44 +++
 .../support/WatcherIndexTemplateRegistry.java | 255 ++++--------------
 .../WatcherIndexTemplateRegistryTests.java    |  15 +-
 5 files changed, 396 insertions(+), 209 deletions(-)
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateConfig.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java
 create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/LifecyclePolicyConfig.java

diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateConfig.java
new file mode 100644
index 00000000000..5eb219f11a7
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateConfig.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+
+package org.elasticsearch.xpack.core.template;
+
+import java.nio.charset.StandardCharsets;
+import java.util.regex.Pattern;
+
+/**
+ * Describes an index template to be loaded from a resource file for use with an {@link IndexTemplateRegistry}.
+ */
+public class IndexTemplateConfig {
+
+    private final String templateName;
+    private final String fileName;
+    private final String version;
+    private final String versionProperty;
+
+    /**
+     * Describes a template to be loaded from a resource file. Includes handling for substituting a version property into the template.
+     *
+     * The {@code versionProperty} parameter will be used to substitute the value of {@code version} into the template. For example,
+     * this template:
+     * {@code {"myTemplateVersion": "${my.version.property}"}}
+     * With {@code version = "42"; versionProperty = "my.version.property"} will result in {@code {"myTemplateVersion": "42"}}.
+     *
+     * @param templateName The name that will be used for the index template. Literal, include the version in this string if
+     *                     it should be used.
+     * @param fileName The filename the template should be loaded from. Literal, should include leading {@literal /} and
+     *                 extension if necessary.
+     * @param version The version of the template. Substituted for {@code versionProperty} as described above.
+     * @param versionProperty The property that will be replaced with the {@code version} string as described above.
+     */
+    public IndexTemplateConfig(String templateName, String fileName, String version, String versionProperty) {
+        this.templateName = templateName;
+        this.fileName = fileName;
+        this.version = version;
+        this.versionProperty = versionProperty;
+    }
+
+    public String getFileName() {
+        return fileName;
+    }
+
+    public String getTemplateName() {
+        return templateName;
+    }
+
+    /**
+     * Loads the template from disk as a UTF-8 byte array.
+     * @return The template as a UTF-8 byte array.
+     */
+    public byte[] loadBytes() {
+        String template = TemplateUtils.loadTemplate(fileName, version,
+            Pattern.quote("${" + versionProperty + "}"));
+        assert template != null && template.length() > 0;
+        return template.getBytes(StandardCharsets.UTF_8);
+    }
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java
new file mode 100644
index 00000000000..d0a086bd649
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java
@@ -0,0 +1,229 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+
+package org.elasticsearch.xpack.core.template;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.NamedXContentRegistry;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.gateway.GatewayService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.xpack.core.XPackClient;
+import org.elasticsearch.xpack.core.XPackSettings;
+import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata;
+import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy;
+import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction;
+
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.Executor;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
+
+/**
+ * Abstracts the logic of managing versioned index templates and lifecycle policies for plugins that require such things.
+ */
+public abstract class IndexTemplateRegistry implements ClusterStateListener {
+    private static final Logger logger = LogManager.getLogger(IndexTemplateRegistry.class);
+
+    protected final Settings settings;
+    protected final Client client;
+    protected final ThreadPool threadPool;
+    protected final NamedXContentRegistry xContentRegistry;
+    protected final ConcurrentMap<String, AtomicBoolean> templateCreationsInProgress = new ConcurrentHashMap<>();
+    protected final ConcurrentMap<String, AtomicBoolean> policyCreationsInProgress = new ConcurrentHashMap<>();
+
+    public IndexTemplateRegistry(Settings nodeSettings, ClusterService clusterService, ThreadPool threadPool, Client client,
+                                 NamedXContentRegistry xContentRegistry) {
+        this.settings = nodeSettings;
+        this.client = client;
+        this.threadPool = threadPool;
+        this.xContentRegistry = xContentRegistry;
+        clusterService.addListener(this);
+    }
+
+    /**
+     * Retrieves return a list of {@link IndexTemplateConfig} that represents
+     * the index templates that should be installed and managed.
+     * @return The configurations for the templates that should be installed.
+     */
+    protected abstract List<IndexTemplateConfig> getTemplateConfigs();
+
+    /**
+     * Retrieves a list of {@link LifecyclePolicyConfig} that represents the ILM
+     * policies that should be installed and managed. Only called if ILM is enabled.
+     * @return The configurations for the lifecycle policies that should be installed.
+     */
+    protected abstract List<LifecyclePolicyConfig> getPolicyConfigs();
+
+    /**
+     * Retrieves an identifier that is used to identify which plugin is asking for this.
+     * @return A string ID for the plugin managing these templates.
+     */
+    protected abstract String getOrigin();
+
+    /**
+     * Called when creation of an index template fails.
+     * @param config The template config that failed to be created.
+     * @param e The exception that caused the failure.
+     */
+    protected void onPutTemplateFailure(IndexTemplateConfig config, Exception e) {
+        logger.error(new ParameterizedMessage("error adding index template [{}] from [{}] for [{}]",
+            config.getTemplateName(), config.getFileName(), getOrigin()), e);
+    }
+
+    /**
+     * Called when creation of a lifecycle policy fails.
+     * @param policy The lifecycle policy that failed to be created.
+     * @param e The exception that caused the failure.
+     */
+    protected void onPutPolicyFailure(LifecyclePolicy policy, Exception e) {
+        logger.error(new ParameterizedMessage("error adding lifecycle policy [{}] for [{}]",
+            policy.getName(), getOrigin()), e);
+    }
+
+    @Override
+    public void clusterChanged(ClusterChangedEvent event) {
+        ClusterState state = event.state();
+        if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) {
+            // wait until the gateway has recovered from disk, otherwise we think may not have the index templates,
+            // while they actually do exist
+            return;
+        }
+
+        // no master node, exit immediately
+        DiscoveryNode masterNode = event.state().getNodes().getMasterNode();
+        if (masterNode == null) {
+            return;
+        }
+
+        // if this node is newer than the master node, we probably need to add the template, which might be newer than the
+        // template the master node has, so we need potentially add new templates despite being not the master node
+        DiscoveryNode localNode = event.state().getNodes().getLocalNode();
+        boolean localNodeVersionAfterMaster = localNode.getVersion().after(masterNode.getVersion());
+
+        if (event.localNodeMaster() || localNodeVersionAfterMaster) {
+            addTemplatesIfMissing(state);
+            addIndexLifecyclePoliciesIfMissing(state);
+        }
+    }
+
+    private void addTemplatesIfMissing(ClusterState state) {
+        final List<IndexTemplateConfig> indexTemplates = getTemplateConfigs();
+        for (IndexTemplateConfig template : indexTemplates) {
+            final String templateName = template.getTemplateName();
+            final AtomicBoolean creationCheck = templateCreationsInProgress.computeIfAbsent(templateName, key -> new AtomicBoolean(false));
+            if (creationCheck.compareAndSet(false, true)) {
+                if (!state.metaData().getTemplates().containsKey(templateName)) {
+                    logger.debug("adding index template [{}] for [{}], because it doesn't exist", templateName, getOrigin());
+                    putTemplate(template, creationCheck);
+                } else {
+                    creationCheck.set(false);
+                    logger.trace("not adding index template [{}] for [{}], because it already exists", templateName, getOrigin());
+                }
+            }
+        }
+    }
+
+    private void putTemplate(final IndexTemplateConfig config, final AtomicBoolean creationCheck) {
+        final Executor executor = threadPool.generic();
+        executor.execute(() -> {
+            final String templateName = config.getTemplateName();
+
+            PutIndexTemplateRequest request = new PutIndexTemplateRequest(templateName).source(config.loadBytes(), XContentType.JSON);
+            request.masterNodeTimeout(TimeValue.timeValueMinutes(1));
+            executeAsyncWithOrigin(client.threadPool().getThreadContext(), getOrigin(), request,
+                new ActionListener<AcknowledgedResponse>() {
+                    @Override
+                    public void onResponse(AcknowledgedResponse response) {
+                        creationCheck.set(false);
+                        if (response.isAcknowledged() == false) {
+                            logger.error("error adding index template [{}] for [{}], request was not acknowledged",
+                                templateName, getOrigin());
+                        }
+                    }
+
+                    @Override
+                    public void onFailure(Exception e) {
+                        creationCheck.set(false);
+                        onPutTemplateFailure(config, e);
+                    }
+                }, client.admin().indices()::putTemplate);
+        });
+    }
+
+    private void addIndexLifecyclePoliciesIfMissing(ClusterState state) {
+        boolean ilmSupported = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(settings);
+
+        if (ilmSupported) {
+            Optional<IndexLifecycleMetadata> maybeMeta = Optional.ofNullable(state.metaData().custom(IndexLifecycleMetadata.TYPE));
+            List<LifecyclePolicy> policies = getPolicyConfigs().stream()
+                .map(policyConfig -> policyConfig.load(xContentRegistry))
+                .collect(Collectors.toList());
+
+            for (LifecyclePolicy policy : policies) {
+                final AtomicBoolean creationCheck = policyCreationsInProgress.computeIfAbsent(policy.getName(),
+                    key -> new AtomicBoolean(false));
+                if (creationCheck.compareAndSet(false, true)) {
+                    final boolean policyNeedsToBeCreated = maybeMeta
+                        .flatMap(ilmMeta -> Optional.ofNullable(ilmMeta.getPolicies().get(policy.getName())))
+                        .isPresent() == false;
+                    if (policyNeedsToBeCreated) {
+                        logger.debug("adding lifecycle policy [{}] for [{}], because it doesn't exist", policy.getName(), getOrigin());
+                        putPolicy(policy, creationCheck);
+                    } else {
+                        logger.trace("not adding lifecycle policy [{}] for [{}], because it already exists",
+                            policy.getName(), getOrigin());
+                        creationCheck.set(false);
+                    }
+                }
+            }
+        }
+    }
+
+    private void putPolicy(final LifecyclePolicy policy, final AtomicBoolean creationCheck) {
+        final Executor executor = threadPool.generic();
+        executor.execute(() -> {
+            PutLifecycleAction.Request request = new PutLifecycleAction.Request(policy);
+            request.masterNodeTimeout(TimeValue.timeValueMinutes(1));
+            executeAsyncWithOrigin(client.threadPool().getThreadContext(), getOrigin(), request,
+                new ActionListener<PutLifecycleAction.Response>() {
+                    @Override
+                    public void onResponse(PutLifecycleAction.Response response) {
+                        creationCheck.set(false);
+                        if (response.isAcknowledged() == false) {
+                            logger.error("error adding lifecycle policy [{}] for [{}], request was not acknowledged",
+                                policy.getName(), getOrigin());
+                        }
+                    }
+
+                    @Override
+                    public void onFailure(Exception e) {
+                        creationCheck.set(false);
+                        onPutPolicyFailure(policy, e);
+                    }
+                }, (req, listener) -> new XPackClient(client).ilmClient().putLifecyclePolicy(req, listener));
+        });
+    }
+
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/LifecyclePolicyConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/LifecyclePolicyConfig.java
new file mode 100644
index 00000000000..c27b262f9f1
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/LifecyclePolicyConfig.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+
+package org.elasticsearch.xpack.core.template;
+
+import org.elasticsearch.common.xcontent.NamedXContentRegistry;
+import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy;
+import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyUtils;
+
+/**
+ * Describes an index lifecycle policy to be loaded from a resource file for use with an {@link IndexTemplateRegistry}.
+ */
+public class LifecyclePolicyConfig {
+
+    private final String policyName;
+    private final String fileName;
+
+    /**
+     * Describes a lifecycle policy definition to be loaded from a resource file.
+     *
+     * @param policyName The name that will be used for the policy.
+     * @param fileName The filename the policy definition should be loaded from. Literal, should include leading {@literal /} and
+     *                 extension if necessary.
+     */
+    public LifecyclePolicyConfig(String policyName, String fileName) {
+        this.policyName = policyName;
+        this.fileName = fileName;
+    }
+
+    public String getPolicyName() {
+        return policyName;
+    }
+
+    public String getFileName() {
+        return fileName;
+    }
+
+    public LifecyclePolicy load(NamedXContentRegistry xContentRegistry) {
+        return LifecyclePolicyUtils.loadPolicy(policyName, fileName, xContentRegistry);
+    }
+}
diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java
index 4ebcc5a8f41..9f5027f7a0f 100644
--- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java
+++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java
@@ -5,240 +5,83 @@
  */
 package org.elasticsearch.xpack.watcher.support;
 
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.message.ParameterizedMessage;
-import org.elasticsearch.action.ActionListener;
-import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
-import org.elasticsearch.action.support.master.AcknowledgedResponse;
 import org.elasticsearch.client.Client;
-import org.elasticsearch.cluster.ClusterChangedEvent;
 import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.ClusterStateListener;
-import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.xcontent.NamedXContentRegistry;
-import org.elasticsearch.common.xcontent.XContentType;
-import org.elasticsearch.gateway.GatewayService;
 import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.xpack.core.XPackClient;
 import org.elasticsearch.xpack.core.XPackSettings;
-import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata;
-import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy;
-import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyUtils;
-import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction;
-import org.elasticsearch.xpack.core.template.TemplateUtils;
+import org.elasticsearch.xpack.core.template.IndexTemplateConfig;
+import org.elasticsearch.xpack.core.template.IndexTemplateRegistry;
+import org.elasticsearch.xpack.core.template.LifecyclePolicyConfig;
 import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField;
 
-import java.nio.charset.StandardCharsets;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.Executor;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.regex.Pattern;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
 
 import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN;
-import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
 
-public class WatcherIndexTemplateRegistry implements ClusterStateListener {
+public class WatcherIndexTemplateRegistry extends IndexTemplateRegistry {
 
-    public static final TemplateConfig TEMPLATE_CONFIG_TRIGGERED_WATCHES = new TemplateConfig(
-            WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME, "triggered-watches");
-    public static final TemplateConfig TEMPLATE_CONFIG_WATCH_HISTORY = new TemplateConfig(
-        WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME, "watch-history");
-    public static final TemplateConfig TEMPLATE_CONFIG_WATCH_HISTORY_NO_ILM = new TemplateConfig(
-        WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME_NO_ILM, "watch-history-no-ilm");
-    public static final TemplateConfig TEMPLATE_CONFIG_WATCHES = new TemplateConfig(
-            WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME, "watches");
-    public static final TemplateConfig[] TEMPLATE_CONFIGS = new TemplateConfig[]{
-        TEMPLATE_CONFIG_TRIGGERED_WATCHES, TEMPLATE_CONFIG_WATCH_HISTORY, TEMPLATE_CONFIG_WATCHES
-    };
-    public static final TemplateConfig[] TEMPLATE_CONFIGS_NO_ILM = new TemplateConfig[]{
-        TEMPLATE_CONFIG_TRIGGERED_WATCHES, TEMPLATE_CONFIG_WATCH_HISTORY_NO_ILM, TEMPLATE_CONFIG_WATCHES
-    };
+    public static final String WATCHER_TEMPLATE_VERSION_VARIABLE = "xpack.watcher.template.version";
+    public static final IndexTemplateConfig TEMPLATE_CONFIG_TRIGGERED_WATCHES = new IndexTemplateConfig(
+        WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME,
+        "/triggered-watches.json",
+        WatcherIndexTemplateRegistryField.INDEX_TEMPLATE_VERSION,
+        WATCHER_TEMPLATE_VERSION_VARIABLE);
+    public static final IndexTemplateConfig TEMPLATE_CONFIG_WATCH_HISTORY = new IndexTemplateConfig(
+        WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME,
+        "/watch-history.json",
+        WatcherIndexTemplateRegistryField.INDEX_TEMPLATE_VERSION,
+        WATCHER_TEMPLATE_VERSION_VARIABLE);
+    public static final IndexTemplateConfig TEMPLATE_CONFIG_WATCH_HISTORY_NO_ILM = new IndexTemplateConfig(
+        WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME_NO_ILM,
+        "/watch-history-no-ilm.json",
+        WatcherIndexTemplateRegistryField.INDEX_TEMPLATE_VERSION,
+        WATCHER_TEMPLATE_VERSION_VARIABLE);
+    public static final IndexTemplateConfig TEMPLATE_CONFIG_WATCHES = new IndexTemplateConfig(
+        WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME,
+        "/watches.json",
+        WatcherIndexTemplateRegistryField.INDEX_TEMPLATE_VERSION,
+        WATCHER_TEMPLATE_VERSION_VARIABLE);
 
-    public static final PolicyConfig POLICY_WATCH_HISTORY = new PolicyConfig("watch-history-ilm-policy", "/watch-history-ilm-policy.json");
+    public static final LifecyclePolicyConfig POLICY_WATCH_HISTORY = new LifecyclePolicyConfig("watch-history-ilm-policy",
+        "/watch-history-ilm-policy.json");
 
-    private static final Logger logger = LogManager.getLogger(WatcherIndexTemplateRegistry.class);
+    private final List<IndexTemplateConfig> templatesToUse;
 
-    private final Settings nodeSettings;
-    private final Client client;
-    private final ThreadPool threadPool;
-    private final NamedXContentRegistry xContentRegistry;
-    private final ConcurrentMap<String, AtomicBoolean> templateCreationsInProgress = new ConcurrentHashMap<>();
-    private final AtomicBoolean historyPolicyCreationInProgress = new AtomicBoolean();
-
-    public WatcherIndexTemplateRegistry(Settings nodeSettings, ClusterService clusterService,
-                                        ThreadPool threadPool, Client client,
+    public WatcherIndexTemplateRegistry(Settings nodeSettings, ClusterService clusterService, ThreadPool threadPool, Client client,
                                         NamedXContentRegistry xContentRegistry) {
-        this.nodeSettings = nodeSettings;
-        this.client = client;
-        this.threadPool = threadPool;
-        this.xContentRegistry = xContentRegistry;
-        clusterService.addListener(this);
+        super(nodeSettings, clusterService, threadPool, client, xContentRegistry);
+        boolean ilmEnabled = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(settings);
+        templatesToUse = Arrays.asList(
+            ilmEnabled ? TEMPLATE_CONFIG_WATCH_HISTORY : TEMPLATE_CONFIG_WATCH_HISTORY_NO_ILM,
+            TEMPLATE_CONFIG_TRIGGERED_WATCHES,
+            TEMPLATE_CONFIG_WATCHES
+        );
     }
 
     @Override
-    public void clusterChanged(ClusterChangedEvent event) {
-        ClusterState state = event.state();
-        if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) {
-            // wait until the gateway has recovered from disk, otherwise we think may not have the index templates,
-            // while they actually do exist
-            return;
-        }
-
-        // no master node, exit immediately
-        DiscoveryNode masterNode = event.state().getNodes().getMasterNode();
-        if (masterNode == null) {
-            return;
-        }
-
-        // if this node is newer than the master node, we probably need to add the history template, which might be newer than the
-        // history template the master node has, so we need potentially add new templates despite being not the master node
-        DiscoveryNode localNode = event.state().getNodes().getLocalNode();
-        boolean localNodeVersionAfterMaster = localNode.getVersion().after(masterNode.getVersion());
-
-        if (event.localNodeMaster() || localNodeVersionAfterMaster) {
-            addTemplatesIfMissing(state);
-            addIndexLifecyclePolicyIfMissing(state);
-        }
+    protected List<IndexTemplateConfig> getTemplateConfigs() {
+        return templatesToUse;
     }
 
-    private void addTemplatesIfMissing(ClusterState state) {
-        boolean ilmSupported = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(this.nodeSettings);
-        final TemplateConfig[] indexTemplates = ilmSupported ? TEMPLATE_CONFIGS : TEMPLATE_CONFIGS_NO_ILM;
-        for (TemplateConfig template : indexTemplates) {
-            final String templateName = template.getTemplateName();
-            final AtomicBoolean creationCheck = templateCreationsInProgress.computeIfAbsent(templateName, key -> new AtomicBoolean(false));
-            if (creationCheck.compareAndSet(false, true)) {
-                if (!state.metaData().getTemplates().containsKey(templateName)) {
-                    logger.debug("adding index template [{}], because it doesn't exist", templateName);
-                    putTemplate(template, creationCheck);
-                } else {
-                    creationCheck.set(false);
-                    logger.trace("not adding index template [{}], because it already exists", templateName);
-                }
-            }
-        }
+    @Override
+    protected List<LifecyclePolicyConfig> getPolicyConfigs() {
+        return Collections.singletonList(POLICY_WATCH_HISTORY);
     }
 
-    private void putTemplate(final TemplateConfig config, final AtomicBoolean creationCheck) {
-        final Executor executor = threadPool.generic();
-        executor.execute(() -> {
-            final String templateName = config.getTemplateName();
-
-            PutIndexTemplateRequest request = new PutIndexTemplateRequest(templateName).source(config.load(), XContentType.JSON);
-            request.masterNodeTimeout(TimeValue.timeValueMinutes(1));
-            executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, request,
-                    new ActionListener<AcknowledgedResponse>() {
-                        @Override
-                        public void onResponse(AcknowledgedResponse response) {
-                            creationCheck.set(false);
-                            if (response.isAcknowledged() == false) {
-                                logger.error("Error adding watcher template [{}], request was not acknowledged", templateName);
-                            }
-                        }
-
-                        @Override
-                        public void onFailure(Exception e) {
-                            creationCheck.set(false);
-                            logger.error(new ParameterizedMessage("Error adding watcher template [{}]", templateName), e);
-                        }
-                    }, client.admin().indices()::putTemplate);
-        });
-    }
-
-    // Package visible for testing
-    LifecyclePolicy loadWatcherHistoryPolicy() {
-        return LifecyclePolicyUtils.loadPolicy(POLICY_WATCH_HISTORY.policyName, POLICY_WATCH_HISTORY.fileName, xContentRegistry);
-    }
-
-    private void addIndexLifecyclePolicyIfMissing(ClusterState state) {
-        boolean ilmSupported = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(this.nodeSettings);
-        if (ilmSupported && historyPolicyCreationInProgress.compareAndSet(false, true)) {
-            final LifecyclePolicy policyOnDisk = loadWatcherHistoryPolicy();
-
-            Optional<IndexLifecycleMetadata> maybeMeta = Optional.ofNullable(state.metaData().custom(IndexLifecycleMetadata.TYPE));
-            final boolean needsUpdating = maybeMeta
-                .flatMap(ilmMeta -> Optional.ofNullable(ilmMeta.getPolicies().get(policyOnDisk.getName())))
-                .isPresent() == false; // If there is no policy then one needs to be put;
-
-            if (needsUpdating) {
-                putPolicy(policyOnDisk, historyPolicyCreationInProgress);
-            } else {
-                historyPolicyCreationInProgress.set(false);
-            }
-        }
-    }
-
-    private void putPolicy(final LifecyclePolicy policy, final AtomicBoolean creationCheck) {
-        final Executor executor = threadPool.generic();
-        executor.execute(() -> {
-            PutLifecycleAction.Request request = new PutLifecycleAction.Request(policy);
-            request.masterNodeTimeout(TimeValue.timeValueMinutes(1));
-            executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, request,
-                new ActionListener<PutLifecycleAction.Response>() {
-                    @Override
-                    public void onResponse(PutLifecycleAction.Response response) {
-                        creationCheck.set(false);
-                        if (response.isAcknowledged() == false) {
-                            logger.error("error adding watcher index lifecycle policy [{}], request was not acknowledged",
-                                policy.getName());
-                        }
-                    }
-
-                    @Override
-                    public void onFailure(Exception e) {
-                        creationCheck.set(false);
-                        logger.error(new ParameterizedMessage("error adding watcher index lifecycle policy [{}]",
-                            policy.getName()), e);
-                    }
-                }, (req, listener) -> new XPackClient(client).ilmClient().putLifecyclePolicy(req, listener));
-        });
+    @Override
+    protected String getOrigin() {
+        return WATCHER_ORIGIN;
     }
 
     public static boolean validate(ClusterState state) {
         return state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME) &&
-                state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME) &&
-                state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME);
+            state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME) &&
+            state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME);
     }
 
-    public static class TemplateConfig {
-
-        private final String templateName;
-        private String fileName;
-
-        TemplateConfig(String templateName, String fileName) {
-            this.templateName = templateName;
-            this.fileName = fileName;
-        }
-
-        public String getFileName() {
-            return fileName;
-        }
-
-        public String getTemplateName() {
-            return templateName;
-        }
-
-        public byte[] load() {
-            String template = TemplateUtils.loadTemplate("/" + fileName + ".json", WatcherIndexTemplateRegistryField.INDEX_TEMPLATE_VERSION,
-                Pattern.quote("${xpack.watcher.template.version}"));
-            assert template != null && template.length() > 0;
-            return template.getBytes(StandardCharsets.UTF_8);
-        }
-    }
-    public static class PolicyConfig {
-
-        private final String policyName;
-        private String fileName;
-
-        PolicyConfig(String templateName, String fileName) {
-            this.policyName = templateName;
-            this.fileName = fileName;
-        }
-    }
 }
diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java
index bd55e757953..a96c04ab7cd 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java
@@ -59,6 +59,7 @@ import java.util.stream.Collectors;
 import static org.elasticsearch.mock.orig.Mockito.verify;
 import static org.elasticsearch.mock.orig.Mockito.when;
 import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
 import static org.hamcrest.Matchers.is;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyObject;
@@ -164,7 +165,11 @@ public class WatcherIndexTemplateRegistryTests extends ESTestCase {
         DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
 
         Map<String, LifecyclePolicy> policyMap = new HashMap<>();
-        LifecyclePolicy policy = registry.loadWatcherHistoryPolicy();
+        List<LifecyclePolicy> policies = registry.getPolicyConfigs().stream()
+            .map(policyConfig -> policyConfig.load(xContentRegistry))
+            .collect(Collectors.toList());
+        assertThat(policies, hasSize(1));
+        LifecyclePolicy policy = policies.get(0);
         policyMap.put(policy.getName(), policy);
         ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyList(), policyMap, nodes);
         registry.clusterChanged(event);
@@ -183,13 +188,17 @@ public class WatcherIndexTemplateRegistryTests extends ESTestCase {
         verify(client, times(0)).execute(eq(PutLifecycleAction.INSTANCE), anyObject(), anyObject());
     }
 
-    public void testPolicyAlreadyExistsButDiffers() throws IOException  {
+    public void testPolicyAlreadyExistsButDiffers() throws IOException {
         DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT);
         DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
 
         Map<String, LifecyclePolicy> policyMap = new HashMap<>();
         String policyStr = "{\"phases\":{\"delete\":{\"min_age\":\"1m\",\"actions\":{\"delete\":{}}}}}";
-        LifecyclePolicy policy = registry.loadWatcherHistoryPolicy();
+        List<LifecyclePolicy> policies = registry.getPolicyConfigs().stream()
+            .map(policyConfig -> policyConfig.load(xContentRegistry))
+            .collect(Collectors.toList());
+        assertThat(policies, hasSize(1));
+        LifecyclePolicy policy = policies.get(0);
         try (XContentParser parser = XContentType.JSON.xContent()
             .createParser(xContentRegistry, LoggingDeprecationHandler.THROW_UNSUPPORTED_OPERATION, policyStr)) {
             LifecyclePolicy different = LifecyclePolicy.parse(parser, policy.getName());

From 946baf87d31e22cffda6a20d6f5a0bd1f779918c Mon Sep 17 00:00:00 2001
From: David Turner <david.turner@elastic.co>
Date: Wed, 17 Apr 2019 19:54:55 +0100
Subject: [PATCH 069/112] Assert TransportReplicationActions acquire permits
 (#41271)

Today we do not distinguish "no operations in flight" from "operations are
blocked", since both return `0` from `IndexShard#getActiveOperationsCount()`.
We therefore cannot assert that every `TransportReplicationAction` performs its
actions under permit(s). This commit fixes this by returning
`IndexShard#OPERATIONS_BLOCKED` if operations are blocked, allowing these two
cases to be distinguished.
---
 ...TransportVerifyShardBeforeCloseAction.java |  4 ++--
 .../TransportReplicationAction.java           |  2 ++
 .../elasticsearch/index/shard/IndexShard.java | 17 ++++++++++----
 .../shard/IndexShardOperationPermits.java     | 11 +++-------
 .../indices/flush/SyncedFlushService.java     |  3 ++-
 ...portVerifyShardBeforeCloseActionTests.java |  8 +++----
 ...TransportResyncReplicationActionTests.java |  7 +++++-
 .../TransportReplicationActionTests.java      | 22 ++++++++++++-------
 ...ReplicationAllPermitsAcquisitionTests.java |  7 +++---
 .../index/shard/IndexShardIT.java             | 14 +++++++++++-
 .../IndexShardOperationPermitsTests.java      |  4 ++--
 .../index/shard/IndexShardTests.java          | 17 +++++++++++---
 12 files changed, 79 insertions(+), 37 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java
index 7d691717de1..22a0777f7bf 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java
@@ -101,8 +101,8 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA
 
     private void executeShardOperation(final ShardRequest request, final IndexShard indexShard) {
         final ShardId shardId = indexShard.shardId();
-        if (indexShard.getActiveOperationsCount() != 0) {
-            throw new IllegalStateException("On-going operations in progress while checking index shard " + shardId + " before closing");
+        if (indexShard.getActiveOperationsCount() != IndexShard.OPERATIONS_BLOCKED) {
+            throw new IllegalStateException("Index shard " + shardId + " is not blocking all operations during closing");
         }
 
         final ClusterBlocks clusterBlocks = clusterService.state().blocks();
diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
index 92687d4880e..80fd7162f3d 100644
--- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
@@ -514,6 +514,7 @@ public abstract class TransportReplicationAction<
         @Override
         public void onResponse(Releasable releasable) {
             try {
+                assert replica.getActiveOperationsCount() != 0 : "must perform shard operation under a permit";
                 final ReplicaResult replicaResult = shardOperationOnReplica(replicaRequest.getRequest(), replica);
                 releasable.close(); // release shard operation lock before responding to caller
                 final TransportReplicationAction.ReplicaResponse response =
@@ -913,6 +914,7 @@ public abstract class TransportReplicationAction<
                     return result;
                 });
             }
+            assert indexShard.getActiveOperationsCount() != 0 : "must perform shard operation under a permit";
             shardOperationOnPrimary(request, indexShard, listener);
         }
 
diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index f9db0b15e7a..d1b5a25db6d 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -636,7 +636,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
             indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> {
                 forceRefreshes.close();
                 // no shard operation permits are being held here, move state from started to relocated
-                assert indexShardOperationPermits.getActiveOperationsCount() == 0 :
+                assert indexShardOperationPermits.getActiveOperationsCount() == OPERATIONS_BLOCKED :
                         "in-flight operations in progress while moving shard state to relocated";
                 /*
                  * We should not invoke the runnable under the mutex as the expected implementation is to handoff the primary context via a
@@ -1553,7 +1553,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
                 assert assertReplicationTarget();
             } else {
                 assert origin == Engine.Operation.Origin.LOCAL_RESET;
-                assert getActiveOperationsCount() == 0 : "Ongoing writes [" + getActiveOperations() + "]";
+                assert getActiveOperationsCount() == OPERATIONS_BLOCKED
+                    : "locally resetting without blocking operations, active operations are [" + getActiveOperations() + "]";
             }
             if (writeAllowedStates.contains(state) == false) {
                 throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " +
@@ -2747,8 +2748,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
         return (opPrimaryTerm > pendingPrimaryTerm) || (allPermits && opPrimaryTerm > getOperationPrimaryTerm());
     }
 
+    public static final int OPERATIONS_BLOCKED = -1;
+
+    /**
+     * Obtain the active operation count, or {@link IndexShard#OPERATIONS_BLOCKED} if all permits are held (even if there are
+     * outstanding operations in flight).
+     *
+     * @return the active operation count, or {@link IndexShard#OPERATIONS_BLOCKED} when all permits are held.
+     */
     public int getActiveOperationsCount() {
-        // refCount is incremented on successful acquire and decremented on close
         return indexShardOperationPermits.getActiveOperationsCount();
     }
 
@@ -3076,7 +3084,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
      * Rollback the current engine to the safe commit, then replay local translog up to the global checkpoint.
      */
     void resetEngineToGlobalCheckpoint() throws IOException {
-        assert getActiveOperationsCount() == 0 : "Ongoing writes [" + getActiveOperations() + "]";
+        assert getActiveOperationsCount() == OPERATIONS_BLOCKED
+            : "resetting engine without blocking operations; active operations are [" + getActiveOperations() + ']';
         sync(); // persist the global checkpoint to disk
         final SeqNoStats seqNoStats = seqNoStats();
         final TranslogStats translogStats = translogStats();
diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java
index fe7a5392a08..672e69743d4 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java
@@ -293,19 +293,14 @@ final class IndexShardOperationPermits implements Closeable {
     }
 
     /**
-     * Obtain the active operation count, or zero if all permits are held (even if there are outstanding operations in flight).
+     * Obtain the active operation count, or {@link IndexShard#OPERATIONS_BLOCKED} if all permits are held.
      *
-     * @return the active operation count, or zero when all permits are held
+     * @return the active operation count, or {@link IndexShard#OPERATIONS_BLOCKED} when all permits are held.
      */
     int getActiveOperationsCount() {
         int availablePermits = semaphore.availablePermits();
         if (availablePermits == 0) {
-            /*
-             * This occurs when either doBlockOperations is holding all the permits or there are outstanding operations in flight and the
-             * remainder of the permits are held by doBlockOperations. We do not distinguish between these two cases and simply say that
-             * the active operations count is zero.
-             */
-            return 0;
+            return IndexShard.OPERATIONS_BLOCKED; // This occurs when blockOperations() has acquired all the permits.
         } else {
             return TOTAL_PERMITS - availablePermits;
         }
diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
index 0423559aaf5..79a2d6c3c0a 100644
--- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
+++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
@@ -538,7 +538,7 @@ public class SyncedFlushService implements IndexEventListener {
             throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
         }
         int opCount = indexShard.getActiveOperationsCount();
-        return new InFlightOpsResponse(opCount);
+        return new InFlightOpsResponse(opCount == IndexShard.OPERATIONS_BLOCKED ? 0 : opCount);
     }
 
     public static final class PreShardSyncedFlushRequest extends TransportRequest {
@@ -781,6 +781,7 @@ public class SyncedFlushService implements IndexEventListener {
         }
 
         InFlightOpsResponse(int opCount) {
+            assert opCount >= 0 : opCount;
             this.opCount = opCount;
         }
 
diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java
index dcdfbb755ba..cd4d8ae6857 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java
@@ -100,7 +100,7 @@ public class TransportVerifyShardBeforeCloseActionTests extends ESTestCase {
         super.setUp();
 
         indexShard = mock(IndexShard.class);
-        when(indexShard.getActiveOperationsCount()).thenReturn(0);
+        when(indexShard.getActiveOperationsCount()).thenReturn(IndexShard.OPERATIONS_BLOCKED);
 
         final ShardId shardId = new ShardId("index", "_na_", randomIntBetween(0, 3));
         when(indexShard.shardId()).thenReturn(shardId);
@@ -165,12 +165,12 @@ public class TransportVerifyShardBeforeCloseActionTests extends ESTestCase {
         assertThat(flushRequest.getValue().force(), is(true));
     }
 
-    public void testOperationFailsWithOnGoingOps() {
-        when(indexShard.getActiveOperationsCount()).thenReturn(randomIntBetween(1, 10));
+    public void testOperationFailsWhenNotBlocked() {
+        when(indexShard.getActiveOperationsCount()).thenReturn(randomIntBetween(0, 10));
 
         IllegalStateException exception = expectThrows(IllegalStateException.class, this::executeOnPrimaryOrReplica);
         assertThat(exception.getMessage(),
-            equalTo("On-going operations in progress while checking index shard " + indexShard.shardId() + " before closing"));
+            equalTo("Index shard " + indexShard.shardId() + " is not blocking all operations during closing"));
         verify(indexShard, times(0)).flush(any(FlushRequest.class));
     }
 
diff --git a/server/src/test/java/org/elasticsearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/resync/TransportResyncReplicationActionTests.java
index ed3663ed18d..167518f4fc9 100644
--- a/server/src/test/java/org/elasticsearch/action/resync/TransportResyncReplicationActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/resync/TransportResyncReplicationActionTests.java
@@ -58,6 +58,7 @@ import java.nio.charset.Charset;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import static java.util.Collections.emptyList;
 import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state;
@@ -118,13 +119,17 @@ public class TransportResyncReplicationActionTests extends ESTestCase {
                 final String allocationId = primaryShardRouting.allocationId().getId();
                 final long primaryTerm = indexMetaData.primaryTerm(shardId.id());
 
+                final AtomicInteger acquiredPermits = new AtomicInteger();
                 final IndexShard indexShard = mock(IndexShard.class);
                 when(indexShard.shardId()).thenReturn(shardId);
                 when(indexShard.routingEntry()).thenReturn(primaryShardRouting);
                 when(indexShard.getPendingPrimaryTerm()).thenReturn(primaryTerm);
+                when(indexShard.getOperationPrimaryTerm()).thenReturn(primaryTerm);
+                when(indexShard.getActiveOperationsCount()).then(i -> acquiredPermits.get());
                 doAnswer(invocation -> {
                     ActionListener<Releasable> callback = (ActionListener<Releasable>) invocation.getArguments()[0];
-                    callback.onResponse(() -> logger.trace("released"));
+                    acquiredPermits.incrementAndGet();
+                    callback.onResponse(acquiredPermits::decrementAndGet);
                     return null;
                 }).when(indexShard).acquirePrimaryOperationPermit(any(ActionListener.class), anyString(), anyObject());
                 when(indexShard.getReplicationGroup()).thenReturn(
diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
index ccb23a9111a..12cc9097b65 100644
--- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
@@ -87,6 +87,7 @@ import org.elasticsearch.transport.nio.MockNioTransport;
 import org.hamcrest.Matcher;
 import org.junit.After;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
@@ -678,16 +679,17 @@ public class TransportReplicationActionTests extends ESTestCase {
         };
         TestAction.PrimaryShardReference primary = action.new PrimaryShardReference(shard, releasable);
         final Request request = new Request(NO_SHARD_ID);
-        primary.perform(request, ActionTestUtils.assertNoFailureListener(r -> {
-            final ElasticsearchException exception = new ElasticsearchException("testing");
-            primary.failShard("test", exception);
+        shard.runUnderPrimaryPermit(() ->
+            primary.perform(request, ActionTestUtils.assertNoFailureListener(r -> {
+                final ElasticsearchException exception = new ElasticsearchException("testing");
+                primary.failShard("test", exception);
 
-            verify(shard).failShard("test", exception);
+                verify(shard).failShard("test", exception);
 
-            primary.close();
+                primary.close();
 
-            assertTrue(closed.get());
-        }));
+                assertTrue(closed.get());
+            })), Assert::assertNotNull, null, null);
     }
 
     public void testReplicaProxy() throws InterruptedException, ExecutionException {
@@ -775,10 +777,12 @@ public class TransportReplicationActionTests extends ESTestCase {
                 inSyncIds,
                 shardRoutingTable.getAllAllocationIds()));
         doAnswer(invocation -> {
+            count.incrementAndGet();
             //noinspection unchecked
-            ((ActionListener<Releasable>)invocation.getArguments()[0]).onResponse(() -> {});
+            ((ActionListener<Releasable>)invocation.getArguments()[0]).onResponse(count::decrementAndGet);
             return null;
         }).when(shard).acquirePrimaryOperationPermit(any(), anyString(), anyObject());
+        when(shard.getActiveOperationsCount()).thenAnswer(i -> count.get());
 
         final IndexService indexService = mock(IndexService.class);
         when(indexService.getShard(shard.shardId().id())).thenReturn(shard);
@@ -1286,6 +1290,8 @@ public class TransportReplicationActionTests extends ESTestCase {
             return null;
         }).when(indexShard)
             .acquireReplicaOperationPermit(anyLong(), anyLong(), anyLong(), any(ActionListener.class), anyString(), anyObject());
+        when(indexShard.getActiveOperationsCount()).thenAnswer(i -> count.get());
+
         when(indexShard.routingEntry()).thenAnswer(invocationOnMock -> {
             final ClusterState state = clusterService.state();
             final RoutingNode node = state.getRoutingNodes().node(state.nodes().getLocalNodeId());
diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java
index 8463d66e98e..28373347b19 100644
--- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java
@@ -316,7 +316,8 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe
                 allPermitsAction.new AsyncPrimaryAction(primaryRequest, allPermitFuture, null) {
                     @Override
                     void runWithPrimaryShardReference(final TransportReplicationAction.PrimaryShardReference reference) {
-                        assertEquals("All permits must be acquired", 0, reference.indexShard.getActiveOperationsCount());
+                        assertEquals("All permits must be acquired",
+                            IndexShard.OPERATIONS_BLOCKED, reference.indexShard.getActiveOperationsCount());
                         assertSame(primary, reference.indexShard);
 
                         final ClusterState clusterState = clusterService.state();
@@ -549,13 +550,13 @@ public class TransportReplicationAllPermitsAcquisitionTests extends IndexShardTe
         @Override
         protected void shardOperationOnPrimary(Request shardRequest, IndexShard shard,
                 ActionListener<PrimaryResult<Request, Response>> listener) {
-            assertEquals("All permits must be acquired", 0, shard.getActiveOperationsCount());
+            assertEquals("All permits must be acquired", IndexShard.OPERATIONS_BLOCKED, shard.getActiveOperationsCount());
             super.shardOperationOnPrimary(shardRequest, shard, listener);
         }
 
         @Override
         protected ReplicaResult shardOperationOnReplica(Request shardRequest, IndexShard shard) throws Exception {
-            assertEquals("All permits must be acquired", 0, shard.getActiveOperationsCount());
+            assertEquals("All permits must be acquired", IndexShard.OPERATIONS_BLOCKED, shard.getActiveOperationsCount());
             return super.shardOperationOnReplica(shardRequest, shard);
         }
     }
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
index c682cbf3c84..5990567c0a0 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
@@ -84,6 +84,7 @@ import org.elasticsearch.test.InternalSettingsPlugin;
 import org.elasticsearch.test.junit.annotations.TestLogging;
 import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.threadpool.ThreadPoolStats;
+import org.junit.Assert;
 
 import java.io.IOException;
 import java.io.UncheckedIOException;
@@ -878,7 +879,18 @@ public class IndexShardIT extends ESSingleNodeTestCase {
         shard.refresh("test");
         assertThat(client().search(countRequest).actionGet().getHits().getTotalHits().value, equalTo(numDocs));
         assertThat(shard.getLocalCheckpoint(), equalTo(shard.seqNoStats().getMaxSeqNo()));
-        shard.resetEngineToGlobalCheckpoint();
+
+        final CountDownLatch engineResetLatch = new CountDownLatch(1);
+        shard.acquireAllPrimaryOperationsPermits(ActionListener.wrap(r -> {
+            try {
+                shard.resetEngineToGlobalCheckpoint();
+            } finally {
+                r.close();
+                engineResetLatch.countDown();
+            }
+        }, Assert::assertNotNull), TimeValue.timeValueMinutes(1L));
+        engineResetLatch.await();
+
         final long moreDocs = between(10, 20);
         for (int i = 0; i < moreDocs; i++) {
             client().prepareIndex("test", "_doc", Long.toString(i + numDocs)).setSource("{}", XContentType.JSON).get();
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java
index a785c2c4d82..416e7170990 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java
@@ -523,8 +523,8 @@ public class IndexShardOperationPermitsTests extends ESTestCase {
         future2.get().close();
         assertThat(permits.getActiveOperationsCount(), equalTo(0));
 
-        try (Releasable releasable = blockAndWait()) {
-            assertThat(permits.getActiveOperationsCount(), equalTo(0));
+        try (Releasable ignored = blockAndWait()) {
+            assertThat(permits.getActiveOperationsCount(), equalTo(IndexShard.OPERATIONS_BLOCKED));
         }
 
         PlainActionFuture<Releasable> future3 = new PlainActionFuture<>();
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
index 05a81c6de3c..41b67369647 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
@@ -126,6 +126,7 @@ import org.elasticsearch.test.DummyShardLock;
 import org.elasticsearch.test.FieldMaskingReader;
 import org.elasticsearch.test.VersionUtils;
 import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.Assert;
 
 import java.io.IOException;
 import java.nio.charset.Charset;
@@ -707,7 +708,7 @@ public class IndexShardTests extends IndexShardTestCase {
                     if (singlePermit) {
                         assertThat(indexShard.getActiveOperationsCount(), greaterThan(0));
                     } else {
-                        assertThat(indexShard.getActiveOperationsCount(), equalTo(0));
+                        assertThat(indexShard.getActiveOperationsCount(), equalTo(IndexShard.OPERATIONS_BLOCKED));
                     }
                     releasable.close();
                     super.onResponse(releasable);
@@ -757,7 +758,7 @@ public class IndexShardTests extends IndexShardTestCase {
         indexShard.acquireAllPrimaryOperationsPermits(futureAllPermits, TimeValue.timeValueSeconds(30L));
         allPermitsAcquired.await();
         assertTrue(blocked.get());
-        assertEquals(0, indexShard.getActiveOperationsCount());
+        assertEquals(IndexShard.OPERATIONS_BLOCKED, indexShard.getActiveOperationsCount());
         assertTrue("Expected no results, operations are blocked", results.asList().isEmpty());
         futures.forEach(future -> assertFalse(future.isDone()));
 
@@ -3666,7 +3667,17 @@ public class IndexShardTests extends IndexShardTestCase {
         });
         thread.start();
         latch.await();
-        shard.resetEngineToGlobalCheckpoint();
+
+        final CountDownLatch engineResetLatch = new CountDownLatch(1);
+        shard.acquireAllReplicaOperationsPermits(shard.getOperationPrimaryTerm(), globalCheckpoint, 0L, ActionListener.wrap(r -> {
+            try {
+                shard.resetEngineToGlobalCheckpoint();
+            } finally {
+                r.close();
+                engineResetLatch.countDown();
+            }
+        }, Assert::assertNotNull), TimeValue.timeValueMinutes(1L));
+        engineResetLatch.await();
         assertThat(getShardDocUIDs(shard), equalTo(docBelowGlobalCheckpoint));
         assertThat(shard.seqNoStats().getMaxSeqNo(), equalTo(globalCheckpoint));
         assertThat(shard.translogStats().estimatedNumberOfOperations(), equalTo(translogStats.estimatedNumberOfOperations()));

From 2523ad03cafc6aaa96d2a0762672730aa7d59aed Mon Sep 17 00:00:00 2001
From: Bob Blank <39975390+BobBlank12@users.noreply.github.com>
Date: Wed, 17 Apr 2019 16:06:03 -0500
Subject: [PATCH 070/112] Fixing missing link on session_token (#41314)

session_token was missing a "secure link"
---
 docs/plugins/repository-s3.asciidoc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc
index b35b137a438..50674b62654 100644
--- a/docs/plugins/repository-s3.asciidoc
+++ b/docs/plugins/repository-s3.asciidoc
@@ -94,10 +94,10 @@ settings belong in the `elasticsearch.yml` file.
 
     An S3 secret key. The `access_key` setting must also be specified.
 
-`session_token`::
+`session_token` ({ref}/secure-settings.html[Secure])::
 
     An S3 session token. The `access_key` and `secret_key` settings must also be
-    specified. (Secure)
+    specified.
 
 `endpoint`::
 

From c77e10b16be8f91a6547a02777e9b9dfae8bd549 Mon Sep 17 00:00:00 2001
From: Armin Braun <me@obrown.io>
Date: Thu, 18 Apr 2019 07:10:23 +0200
Subject: [PATCH 071/112] Handle Bulk Requests on Write Threadpool (#40866)
 (#41315)

* Bulk requests can be thousands of items large and take more than O(10ms) time to handle => we should not handle them on the transport threadpool to not block select loops
* relates #39128
* relates #39658
---
 .../action/bulk/TransportBulkAction.java             |  5 +++--
 .../action/support/HandledTransportAction.java       | 11 ++++++++---
 .../action/bulk/BulkProcessorRetryIT.java            | 12 +++++++++---
 ...ortBulkActionIndicesThatCannotBeCreatedTests.java |  9 ++++++++-
 .../action/bulk/TransportBulkActionIngestTests.java  | 10 +++++++++-
 5 files changed, 37 insertions(+), 10 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java
index 9adc92e02be..19ee2efeab6 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java
@@ -117,7 +117,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
                                TransportShardBulkAction shardBulkAction, NodeClient client,
                                ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
                                AutoCreateIndex autoCreateIndex, LongSupplier relativeTimeProvider) {
-        super(BulkAction.NAME, transportService, actionFilters, (Supplier<BulkRequest>) BulkRequest::new);
+        super(BulkAction.NAME, transportService, actionFilters, (Supplier<BulkRequest>) BulkRequest::new, ThreadPool.Names.WRITE);
         Objects.requireNonNull(relativeTimeProvider);
         this.threadPool = threadPool;
         this.clusterService = clusterService;
@@ -258,7 +258,8 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
                         @Override
                         public void onResponse(CreateIndexResponse result) {
                             if (counter.decrementAndGet() == 0) {
-                                executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated);
+                                threadPool.executor(ThreadPool.Names.WRITE).execute(
+                                    () -> executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated));
                             }
                         }
 
diff --git a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java
index c0bc0af8399..ca10583ce24 100644
--- a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java
@@ -18,7 +18,6 @@
  */
 package org.elasticsearch.action.support;
 
-import org.apache.logging.log4j.Logger;
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.action.ActionResponse;
 import org.elasticsearch.common.io.stream.Writeable;
@@ -57,6 +56,13 @@ public abstract class HandledTransportAction<Request extends ActionRequest, Resp
             new TransportHandler());
     }
 
+    protected HandledTransportAction(String actionName, TransportService transportService, ActionFilters actionFilters,
+                                     Supplier<Request> request, String executor) {
+        super(actionName, actionFilters, transportService.getTaskManager());
+        transportService.registerRequestHandler(actionName, request, executor, false, true,
+            new TransportHandler());
+    }
+
     protected HandledTransportAction(String actionName, boolean canTripCircuitBreaker,
                                      TransportService transportService, ActionFilters actionFilters,
                                      Writeable.Reader<Request> requestReader) {
@@ -73,9 +79,8 @@ public abstract class HandledTransportAction<Request extends ActionRequest, Resp
 
     class TransportHandler implements TransportRequestHandler<Request> {
         @Override
-        public final void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception {
+        public final void messageReceived(final Request request, final TransportChannel channel, Task task) {
             // We already got the task created on the network layer - no need to create it again on the transport layer
-            Logger logger = HandledTransportAction.this.logger;
             execute(task, request, new ChannelActionListener<>(channel, actionName, request));
         }
     }
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java
index 515c539a884..e4b6fff9fc3 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java
@@ -25,6 +25,7 @@ import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.index.query.QueryBuilders;
 import org.elasticsearch.rest.RestStatus;
 import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.transport.RemoteTransportException;
 
 import java.util.Collections;
 import java.util.Iterator;
@@ -133,9 +134,14 @@ public class BulkProcessorRetryIT extends ESIntegTestCase {
                     }
                 }
             } else {
-                Throwable t = (Throwable) response;
-                // we're not expecting any other errors
-                throw new AssertionError("Unexpected failure", t);
+                if (response instanceof RemoteTransportException
+                    && ((RemoteTransportException) response).status() == RestStatus.TOO_MANY_REQUESTS && rejectedExecutionExpected) {
+                    // ignored, we exceeded the write queue size with dispatching the initial bulk request
+                } else {
+                    Throwable t = (Throwable) response;
+                    // we're not expecting any other errors
+                    throw new AssertionError("Unexpected failure", t);
+                }
             }
         }
 
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java
index 10014c6fb3f..f213b523fbf 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java
@@ -30,20 +30,24 @@ import org.elasticsearch.cluster.metadata.MetaData;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
 import org.elasticsearch.index.IndexNotFoundException;
 import org.elasticsearch.index.VersionType;
 import org.elasticsearch.tasks.Task;
 import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.transport.TransportService;
 
 import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ExecutorService;
 import java.util.function.Function;
 
 import static java.util.Collections.emptySet;
 import static java.util.Collections.singleton;
+import static org.mockito.Matchers.anyString;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -102,7 +106,10 @@ public class TransportBulkActionIndicesThatCannotBeCreatedTests extends ESTestCa
         ClusterState state = mock(ClusterState.class);
         when(state.getMetaData()).thenReturn(MetaData.EMPTY_META_DATA);
         when(clusterService.state()).thenReturn(state);
-        TransportBulkAction action = new TransportBulkAction(null, mock(TransportService.class), clusterService,
+        final ThreadPool threadPool = mock(ThreadPool.class);
+        final ExecutorService direct = EsExecutors.newDirectExecutorService();
+        when(threadPool.executor(anyString())).thenReturn(direct);
+        TransportBulkAction action = new TransportBulkAction(threadPool, mock(TransportService.class), clusterService,
                 null, null, null, mock(ActionFilters.class), null, null) {
             @Override
             void executeBulk(Task task, BulkRequest bulkRequest, long startTimeNanos, ActionListener<BulkResponse> listener,
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java
index b3ecc590767..b570ec8f781 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java
@@ -45,11 +45,13 @@ import org.elasticsearch.common.settings.ClusterSettings;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.util.concurrent.AtomicArray;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
 import org.elasticsearch.index.IndexNotFoundException;
 import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.ingest.IngestService;
 import org.elasticsearch.tasks.Task;
 import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.transport.TransportResponseHandler;
 import org.elasticsearch.transport.TransportService;
 import org.junit.Before;
@@ -61,6 +63,7 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.Map;
+import java.util.concurrent.ExecutorService;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.BiConsumer;
 import java.util.function.Consumer;
@@ -68,6 +71,7 @@ import java.util.function.Consumer;
 import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.sameInstance;
 import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
@@ -92,6 +96,7 @@ public class TransportBulkActionIngestTests extends ESTestCase {
     TransportService transportService;
     ClusterService clusterService;
     IngestService ingestService;
+    ThreadPool threadPool;
 
     /** Arguments to callbacks we want to capture, but which require generics, so we must use @Captor */
     @Captor
@@ -126,7 +131,7 @@ public class TransportBulkActionIngestTests extends ESTestCase {
         boolean indexCreated = true; // If set to false, will be set to true by call to createIndex
 
         TestTransportBulkAction() {
-            super(null, transportService, clusterService, ingestService,
+            super(threadPool, transportService, clusterService, ingestService,
                 null, null, new ActionFilters(Collections.emptySet()), null,
                 new AutoCreateIndex(
                     SETTINGS, new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
@@ -163,6 +168,9 @@ public class TransportBulkActionIngestTests extends ESTestCase {
     @Before
     public void setupAction() {
         // initialize captors, which must be members to use @Capture because of generics
+        threadPool = mock(ThreadPool.class);
+        final ExecutorService direct = EsExecutors.newDirectExecutorService();
+        when(threadPool.executor(anyString())).thenReturn(direct);
         MockitoAnnotations.initMocks(this);
         // setup services that will be called by action
         transportService = mock(TransportService.class);

From 44a0c468cf7da451ee572ccdfb3913283efbe16f Mon Sep 17 00:00:00 2001
From: Alpar Torok <torokalpar@gmail.com>
Date: Thu, 18 Apr 2019 08:51:10 +0300
Subject: [PATCH 072/112] Clean up  clusters between tests (#41187)

This PR adds additional cleanup when stopping the node.
The data dir is excepted because it gets reused in some tests.
Without this cleanup the number of working dir copies could grew to
exhaust all available disk space.
---
 .../org/elasticsearch/gradle/test/ClusterFormationTasks.groovy  | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy
index 824cb161a63..4b1e5596fe8 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy
@@ -964,6 +964,8 @@ class ClusterFormationTasks {
             }
             doLast {
                 project.delete(node.pidFile)
+                // Large tests can exhaust disk space, clean up on stop, but leave the data dir as some tests reuse it
+                project.delete(project.fileTree(node.baseDir).minus(project.fileTree(node.dataDir)))
             }
         }
     }

From a4a4259cac97ffbfc54907a5601d74105b3e90a8 Mon Sep 17 00:00:00 2001
From: Alpar Torok <torokalpar@gmail.com>
Date: Thu, 18 Apr 2019 09:25:30 +0300
Subject: [PATCH 073/112] Mute failing test

Tracking #41326
---
 .../org/elasticsearch/snapshots/SnapshotResiliencyTests.java    | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java
index 9c1d256b552..12be86a4115 100644
--- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java
+++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java
@@ -286,6 +286,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
         assertThat(snapshotIds, hasSize(1));
     }
 
+    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41326")
     public void testConcurrentSnapshotCreateAndDelete() {
         setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10));
 
@@ -335,6 +336,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
      * Simulates concurrent restarts of data and master nodes as well as relocating a primary shard, while starting and subsequently
      * deleting a snapshot.
      */
+    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41326")
     public void testSnapshotPrimaryRelocations() {
         final int masterNodeCount = randomFrom(1, 3, 5);
         setupTestCluster(masterNodeCount, randomIntBetween(2, 10));

From 45c151ff8bd1428db8f52455f5cad43a03a75346 Mon Sep 17 00:00:00 2001
From: Alpar Torok <torokalpar@gmail.com>
Date: Thu, 18 Apr 2019 09:50:49 +0300
Subject: [PATCH 074/112] Disable composePull only if it exists (#41306)

The task will not be created when docker is not available.
---
 distribution/docker/build.gradle | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle
index 71fc62673dc..00fceb9df6a 100644
--- a/distribution/docker/build.gradle
+++ b/distribution/docker/build.gradle
@@ -142,4 +142,4 @@ assemble.dependsOn "buildDockerImage"
 // We build the images used in compose locally, but the pull command insists on using a repository
 // thus we must disable it to prevent it from doing so. 
 // Everything will still be pulled since we will build the local images on a pull
-composePull.enabled = false
+tasks.matching { name == "composePull" }.all { enabled = false }

From 343039e20053aeb42a26438078ae2dc6472ef9c3 Mon Sep 17 00:00:00 2001
From: Adrien Grand <jpountz@gmail.com>
Date: Thu, 18 Apr 2019 11:18:25 +0200
Subject: [PATCH 075/112] Disable
 CcrRetentionLeaseIT#testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes.

Relates #39331.
---
 .../java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java    | 1 +
 1 file changed, 1 insertion(+)

diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java
index bfa142a64e7..6f97a6ba586 100644
--- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java
+++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java
@@ -266,6 +266,7 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase {
 
     }
 
+    @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/39331")
     public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws Exception {
         final String leaderIndex = "leader";
         final int numberOfShards = randomIntBetween(1, 3);

From 86e56590a7d6b0f6551b18d6c950f63dc135d677 Mon Sep 17 00:00:00 2001
From: Adrien Grand <jpountz@gmail.com>
Date: Thu, 18 Apr 2019 11:31:00 +0200
Subject: [PATCH 076/112] Revert "Disable
 CcrRetentionLeaseIT#testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes."

This reverts commit 343039e20053aeb42a26438078ae2dc6472ef9c3.
---
 .../java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java    | 1 -
 1 file changed, 1 deletion(-)

diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java
index 6f97a6ba586..bfa142a64e7 100644
--- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java
+++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java
@@ -266,7 +266,6 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase {
 
     }
 
-    @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/39331")
     public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws Exception {
         final String leaderIndex = "leader";
         final int numberOfShards = randomIntBetween(1, 3);

From 389a13b68e3b696ecde4ce25df76d4eaa00db898 Mon Sep 17 00:00:00 2001
From: Armin Braun <me@obrown.io>
Date: Thu, 18 Apr 2019 11:55:28 +0200
Subject: [PATCH 077/112] Mute
 BulkProcessorRetryIT#testBulkRejectionLoadWithBackoff (#41325) (#41331)

* For #41324
---
 .../test/java/org/elasticsearch/client/BulkProcessorRetryIT.java | 1 +
 .../java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java | 1 +
 2 files changed, 2 insertions(+)

diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java
index e3b280225cb..19e2de22e54 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java
@@ -56,6 +56,7 @@ public class BulkProcessorRetryIT extends ESRestHighLevelClientTestCase {
         executeBulkRejectionLoad(BackoffPolicy.noBackoff(), rejectedExecutionExpected);
     }
 
+    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41324")
     public void testBulkRejectionLoadWithBackoff() throws Throwable {
         boolean rejectedExecutionExpected = false;
         executeBulkRejectionLoad(BackoffPolicy.exponentialBackoff(), rejectedExecutionExpected);
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java
index e4b6fff9fc3..054e213d5ab 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java
@@ -64,6 +64,7 @@ public class BulkProcessorRetryIT extends ESIntegTestCase {
         executeBulkRejectionLoad(BackoffPolicy.noBackoff(), rejectedExecutionExpected);
     }
 
+    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41324")
     public void testBulkRejectionLoadWithBackoff() throws Throwable {
         boolean rejectedExecutionExpected = false;
         executeBulkRejectionLoad(BackoffPolicy.exponentialBackoff(), rejectedExecutionExpected);

From a699cb76a5cde8b9e1252605d3f3c98a4304e38c Mon Sep 17 00:00:00 2001
From: Adrien Grand <jpountz@gmail.com>
Date: Thu, 18 Apr 2019 14:41:09 +0200
Subject: [PATCH 078/112] Fix javadoc tag. (#41330)

s/returns/return/
---
 .../src/main/java/org/elasticsearch/indices/IndicesService.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java
index e07a83e2031..adcc70d741b 100644
--- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java
+++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java
@@ -321,7 +321,7 @@ public class IndicesService extends AbstractLifecycleComponent
      * are closed and all shard {@link CacheHelper#addClosedListener(org.apache.lucene.index.IndexReader.ClosedListener) closed
      * listeners} have run. However some {@link IndexEventListener#onStoreClosed(ShardId) shard closed listeners} might not have
      * run.
-     * @returns true if all shards closed within the given timeout, false otherwise
+     * @return true if all shards closed within the given timeout, false otherwise
      * @throws InterruptedException if the current thread got interrupted while waiting for shards to close
      */
     public boolean awaitClose(long timeout, TimeUnit timeUnit) throws InterruptedException {

From 0a343be90c29521539bb146b044d606177989e44 Mon Sep 17 00:00:00 2001
From: Nik Everett <nik9000@gmail.com>
Date: Thu, 18 Apr 2019 09:17:22 -0400
Subject: [PATCH 079/112] Docs: Fix deprecation warning in Asciidoctor

Fix a deprecation warning that wasn't rendering correctly in
asciidoctor. This one needed to be explicitly marked as an inline macro
because it is on its own line and it needed to have its text escaped
because it contained a `,`. It also was missing explanitory text for
what the setting was.
---
 docs/plugins/repository-gcs.asciidoc | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc
index 436f9cc9aae..d0e603def50 100644
--- a/docs/plugins/repository-gcs.asciidoc
+++ b/docs/plugins/repository-gcs.asciidoc
@@ -88,9 +88,8 @@ A JSON service account file looks like this:
 ----
 // NOTCONSOLE
 
-To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore].  You must
-add a `file` setting with the name `gcs.client.NAME.credentials_file` using the `add-file` subcommand.
- `NAME` is the name of the client configuration for the repository. The implicit client
+To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore].  You must add a setting name of the form `gcs.client.NAME.credentials_file`, where `NAME`
+is the name of the client configuration for the repository. The implicit client
 name is `default`, but a different client name can be specified in the
 repository settings with the `client` key. 
 
@@ -245,7 +244,8 @@ include::repository-shared-settings.asciidoc[]
 
 `application_name`::
 
-    deprecated[7.0.0, This setting is now defined in the <<repository-gcs-client, client settings>>]
+    deprecated:[6.3.0, "This setting is now defined in the <<repository-gcs-client, client settings>>."]
+    Name used by the client when it uses the Google Cloud Storage service.
 
 [[repository-gcs-bucket-permission]]
 ===== Recommended Bucket Permission

From 6b4cf8f0bdcacf2c39286c4ab3e39628f61f983e Mon Sep 17 00:00:00 2001
From: Nik Everett <nik9000@gmail.com>
Date: Thu, 18 Apr 2019 09:21:13 -0400
Subject: [PATCH 080/112] Docs: Revert accidental gcs docs change

I snuck an extra change in my last commit accidentally. This reverts it.
---
 docs/plugins/repository-gcs.asciidoc | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc
index d0e603def50..536df84d16c 100644
--- a/docs/plugins/repository-gcs.asciidoc
+++ b/docs/plugins/repository-gcs.asciidoc
@@ -88,8 +88,9 @@ A JSON service account file looks like this:
 ----
 // NOTCONSOLE
 
-To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore].  You must add a setting name of the form `gcs.client.NAME.credentials_file`, where `NAME`
-is the name of the client configuration for the repository. The implicit client
+To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore].  You must
+add a `file` setting with the name `gcs.client.NAME.credentials_file` using the `add-file` subcommand.
+ `NAME` is the name of the client configuration for the repository. The implicit client
 name is `default`, but a different client name can be specified in the
 repository settings with the `client` key. 
 

From 11dc9fe249dd609ec986d5d887a67926c3b5d244 Mon Sep 17 00:00:00 2001
From: Simon Willnauer <simonw@apache.org>
Date: Thu, 18 Apr 2019 19:14:23 +0200
Subject: [PATCH 081/112] Mark searcher as accessed in acquireSearcher (#41335)

This fixes an issue where every N seconds a slow search request is triggered
since the searcher access time is not set unless the shard is idle. This change
moves to a more pro-active approach setting the searcher as accessed all the time.
---
 .../main/java/org/elasticsearch/index/shard/IndexShard.java | 5 ++---
 .../java/org/elasticsearch/index/shard/IndexShardTests.java | 6 ++++++
 2 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index d1b5a25db6d..5c75b47d655 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -1215,6 +1215,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
 
     private Engine.Searcher acquireSearcher(String source, Engine.SearcherScope scope) {
         readAllowed();
+        markSearcherAccessed();
         final Engine engine = getEngine();
         final Engine.Searcher searcher = engine.acquireSearcher(source, scope);
         assert ElasticsearchDirectoryReader.unwrap(searcher.getDirectoryReader())
@@ -2990,9 +2991,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
      *                 <code>true</code> if the listener was registered to wait for a refresh.
      */
     public final void awaitShardSearchActive(Consumer<Boolean> listener) {
-        if (isSearchIdle()) {
-            markSearcherAccessed(); // move the shard into non-search idle
-        }
+        markSearcherAccessed(); // move the shard into non-search idle
         final Translog.Location location = pendingRefreshLocation.get();
         if (location != null) {
             addRefreshListener(location, (b) -> {
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
index 41b67369647..0ac168519c6 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
@@ -3240,6 +3240,12 @@ public class IndexShardTests extends IndexShardTestCase {
             // now loop until we are fast enough... shouldn't take long
             primary.awaitShardSearchActive(aBoolean -> {});
         } while (primary.isSearchIdle());
+
+        assertBusy(() -> assertTrue(primary.isSearchIdle()));
+        do {
+            // now loop until we are fast enough... shouldn't take long
+            primary.acquireSearcher("test").close();
+        } while (primary.isSearchIdle());
         closeShards(primary);
     }
 

From 0227ac5690fb78341b521056d5fac1bb3f129812 Mon Sep 17 00:00:00 2001
From: Mark Vieira <portugee@gmail.com>
Date: Thu, 18 Apr 2019 11:15:34 -0700
Subject: [PATCH 082/112] Fix issue with subproject test task dependencies
 (#41321) (#41351)

---
 .../elasticsearch/gradle/precommit/TestingConventionsTasks.java | 2 +-
 x-pack/plugin/sql/qa/security/build.gradle                      | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java
index b2228f5c1b1..95ad323ceda 100644
--- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java
+++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java
@@ -65,7 +65,7 @@ public class TestingConventionsTasks extends DefaultTask {
     public TestingConventionsTasks() {
         setDescription("Tests various testing conventions");
         // Run only after everything is compiled
-        Boilerplate.getJavaSourceSets(getProject()).all(sourceSet -> dependsOn(sourceSet.getClassesTaskName()));
+        Boilerplate.getJavaSourceSets(getProject()).all(sourceSet -> dependsOn(sourceSet.getOutput().getClassesDirs()));
         naming = getProject().container(TestingConventionRule.class);
     }
 
diff --git a/x-pack/plugin/sql/qa/security/build.gradle b/x-pack/plugin/sql/qa/security/build.gradle
index 525acaf99d6..a0e6e82ed4d 100644
--- a/x-pack/plugin/sql/qa/security/build.gradle
+++ b/x-pack/plugin/sql/qa/security/build.gradle
@@ -15,6 +15,7 @@ subprojects {
     test {
       mainProject.sourceSets.test.output.classesDirs.each { dir ->
         output.addClassesDir { dir }
+        output.builtBy(mainProject.tasks.testClasses)
       }
       runtimeClasspath += mainProject.sourceSets.test.output
     }

From cfed5d65bee8d16ee7822cf6ec593dac18b26c43 Mon Sep 17 00:00:00 2001
From: Andrei Stefan <astefan@users.noreply.github.com>
Date: Thu, 18 Apr 2019 20:09:59 +0300
Subject: [PATCH 083/112] SQL: fix *SecurityIT tests by covering edge case
 scenarios when audit file rolls over at midnight (#41328)

* Handle the scenario where assertLogs() is not called from a test method
but the audit rolling file rolls over.
* Use a local boolean variable instead of the static one to account for
assertBusy() code block possibly being called multiple times and having
different execution paths.

(cherry picked from commit 6f642196cbab90079c610097befc794746170df1)
---
 .../sql/qa/security/SqlSecurityTestCase.java      | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)

diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java
index f4aadbdf7cd..313d0cdb5cf 100644
--- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java
+++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java
@@ -188,6 +188,16 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase {
             } catch (IOException e) {
                 throw new RuntimeException(e);
             }
+            
+            // The log file can roll over without being caught by assertLogs() method: in those tests where exceptions are being handled
+            // and no audit logs being read (and, thus, assertLogs() is not called) - for example testNoMonitorMain() method: there are no
+            // calls to auditLogs(), and the method could run while the audit file is rolled over.
+            // If this happens, next call to auditLogs() will make the tests read from the rolled over file using the main audit file
+            // offset, which will most likely not going to work since the offset will happen somewhere in the middle of a json line.
+            if (auditFileRolledOver == false && Files.exists(ROLLED_OVER_AUDIT_LOG_FILE)) {
+                // once the audit file rolled over, it will stay like this
+                auditFileRolledOver = true;
+            }
             return null;
         });
     }
@@ -568,6 +578,9 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase {
             assertFalse("Previous test had an audit-related failure. All subsequent audit related assertions are bogus because we can't "
                     + "guarantee that we fully cleaned up after the last test.", auditFailure);
             try {
+                // use a second variable since the `assertBusy()` block can be executed multiple times and the
+                // static auditFileRolledOver value can change and mess up subsequent calls of this code block
+                boolean localAuditFileRolledOver = auditFileRolledOver;
                 assertBusy(() -> {
                     SecurityManager sm = System.getSecurityManager();
                     if (sm != null) {
@@ -579,7 +592,7 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase {
                         try {
                             // the audit log file rolled over during the test
                             // and we need to consume the rest of the rolled over file plus the new audit log file
-                            if (auditFileRolledOver == false && Files.exists(ROLLED_OVER_AUDIT_LOG_FILE)) {
+                            if (localAuditFileRolledOver == false && Files.exists(ROLLED_OVER_AUDIT_LOG_FILE)) {
                                 // once the audit file rolled over, it will stay like this
                                 auditFileRolledOver = true;
                                 // the order in the array matters, as the readers will be used in that order

From 5f497d427496b90b1dbd9263aeb1093158c62cfe Mon Sep 17 00:00:00 2001
From: Costin Leau <costin@users.noreply.github.com>
Date: Thu, 18 Apr 2019 22:44:16 +0300
Subject: [PATCH 084/112] SQL: Predicate diff takes into account all values
 (#41346)

Fix bug in predicate subtraction that caused the evaluation to be
skipped on the first mismatch instead of evaluating the whole list. In
some cases this caused not only an incorrect result but one that kept on
growing causing the engine to bail

Fix #40835

(cherry picked from commit bd2b33d6eaca616a5acd846204e2d12f905854d4)
---
 .../sql/capabilities/UnresolvedException.java |  6 +----
 .../sql/expression/predicate/Predicates.java  | 17 +++++++-----
 .../xpack/sql/optimizer/Optimizer.java        |  2 +-
 .../xpack/sql/rule/RuleExecutor.java          |  4 +--
 .../xpack/sql/optimizer/OptimizerTests.java   | 26 ++++++++++++++++---
 5 files changed, 38 insertions(+), 17 deletions(-)

diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java
index d6c7543f6af..0db87c6b944 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java
@@ -7,16 +7,12 @@ package org.elasticsearch.xpack.sql.capabilities;
 
 import org.elasticsearch.xpack.sql.ServerSqlException;
 
-import java.util.Locale;
-
-import static java.lang.String.format;
-
 /**
  * Thrown when we accidentally attempt to resolve something on on an unresolved entity. Throwing this
  * is always a bug.
  */
 public class UnresolvedException extends ServerSqlException {
     public UnresolvedException(String action, Object target) {
-        super(format(Locale.ROOT, "Invalid call to %s on an unresolved object %s", action, target));
+        super("Invalid call to {} on an unresolved object {}", action, target);
     }
 }
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java
index c280d974e88..83a4b96f829 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java
@@ -97,14 +97,19 @@ public abstract class Predicates {
         return common.isEmpty() ? emptyList() : common;
     }
 
-    public static List<Expression> subtract(List<Expression> from, List<Expression> r) {
-        List<Expression> diff = new ArrayList<>(Math.min(from.size(), r.size()));
-        for (Expression lExp : from) {
-            for (Expression rExp : r) {
-                if (!lExp.semanticEquals(rExp)) {
-                    diff.add(lExp);
+    public static List<Expression> subtract(List<Expression> from, List<Expression> list) {
+        List<Expression> diff = new ArrayList<>(Math.min(from.size(), list.size()));
+        for (Expression f : from) {
+            boolean found = false;
+            for (Expression l : list) {
+                if (f.semanticEquals(l)) {
+                    found = true;
+                    break;
                 }
             }
+            if (found == false) {
+                diff.add(f);
+            }
         }
         return diff.isEmpty() ? emptyList() : diff;
     }
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java
index d6e4c4fe07d..eafdf21b119 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java
@@ -1236,7 +1236,7 @@ public class Optimizer extends RuleExecutor<LogicalPlan> {
 
         @Override
         protected Expression rule(Expression e) {
-            if (e instanceof BinaryPredicate) {
+            if (e instanceof And || e instanceof Or) {
                 return simplifyAndOr((BinaryPredicate<?, ?, ?, ?>) e);
             }
             if (e instanceof Not) {
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java
index 2ed68def135..689b0b338a9 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java
@@ -38,7 +38,7 @@ public abstract class RuleExecutor<TreeType extends Node<TreeType>> {
 
         boolean reached(int runs) {
             if (runs >= this.runs) {
-                throw new RuleExecutionException("Rule execution limit %d reached", runs);
+                throw new RuleExecutionException("Rule execution limit [{}] reached", runs);
             }
             return false;
         }
@@ -139,7 +139,7 @@ public abstract class RuleExecutor<TreeType extends Node<TreeType>> {
 
         for (Batch batch : batches) {
             int batchRuns = 0;
-            List<Transformation> tfs = new ArrayList<Transformation>();
+            List<Transformation> tfs = new ArrayList<>();
             transformations.put(batch, tfs);
 
             boolean hasChanged = false;
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java
index a23d88b5956..c95206c29e9 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java
@@ -181,9 +181,12 @@ public class OptimizerTests extends ESTestCase {
     }
 
     private static FieldAttribute getFieldAttribute() {
-        return new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true));
+        return getFieldAttribute("a");
     }
 
+    private static FieldAttribute getFieldAttribute(String name) {
+        return new FieldAttribute(EMPTY, name, new EsField(name + "f", DataType.INTEGER, emptyMap(), true));
+    }
 
     public void testPruneSubqueryAliases() {
         ShowTables s = new ShowTables(EMPTY, null, null);
@@ -1145,6 +1148,23 @@ public class OptimizerTests extends ESTestCase {
         assertEquals(or, exp);
     }
 
+    // (a = 1 AND b = 3 AND c = 4) OR (a = 2 AND b = 3 AND c = 4) -> (b = 3 AND c = 4) AND (a = 1 OR a = 2)
+    public void testBooleanSimplificationCommonExpressionSubstraction() {
+        FieldAttribute fa = getFieldAttribute("a");
+        FieldAttribute fb = getFieldAttribute("b");
+        FieldAttribute fc = getFieldAttribute("c");
+
+        Expression a1 = new Equals(EMPTY, fa, ONE);
+        Expression a2 = new Equals(EMPTY, fa, TWO);
+        And common = new And(EMPTY, new Equals(EMPTY, fb, THREE), new Equals(EMPTY, fc, FOUR));
+        And left = new And(EMPTY, a1, common);
+        And right = new And(EMPTY, a2, common);
+        Or or = new Or(EMPTY, left, right);
+
+        Expression exp = new BooleanSimplification().rule(or);
+        assertEquals(new And(EMPTY, common, new Or(EMPTY, a1, a2)), exp);
+    }
+
     // (0 < a <= 1) OR (0 < a < 2) -> 0 < a < 2
     public void testRangesOverlappingNoLowerBoundary() {
         FieldAttribute fa = getFieldAttribute();
@@ -1289,7 +1309,7 @@ public class OptimizerTests extends ESTestCase {
         Order firstOrderBy = new Order(EMPTY, firstField, OrderDirection.ASC, Order.NullsPosition.LAST);
         Order secondOrderBy = new Order(EMPTY, secondField, OrderDirection.ASC, Order.NullsPosition.LAST);
         
-        OrderBy orderByPlan = new OrderBy(EMPTY, 
+        OrderBy orderByPlan = new OrderBy(EMPTY,
                 new Aggregate(EMPTY, FROM(), Arrays.asList(secondField, firstField), Arrays.asList(secondAlias, firstAlias)),
                 Arrays.asList(firstOrderBy, secondOrderBy));
         LogicalPlan result = new Optimizer.SortAggregateOnOrderBy().apply(orderByPlan);
@@ -1321,7 +1341,7 @@ public class OptimizerTests extends ESTestCase {
         Order firstOrderBy = new Order(EMPTY, firstAlias, OrderDirection.ASC, Order.NullsPosition.LAST);
         Order secondOrderBy = new Order(EMPTY, secondAlias, OrderDirection.ASC, Order.NullsPosition.LAST);
         
-        OrderBy orderByPlan = new OrderBy(EMPTY, 
+        OrderBy orderByPlan = new OrderBy(EMPTY,
                 new Aggregate(EMPTY, FROM(), Arrays.asList(secondAlias, firstAlias), Arrays.asList(secondAlias, firstAlias)),
                 Arrays.asList(firstOrderBy, secondOrderBy));
         LogicalPlan result = new Optimizer.SortAggregateOnOrderBy().apply(orderByPlan);

From 1f5cd410b821e426c144f71ea970b333ee150163 Mon Sep 17 00:00:00 2001
From: Jay Modi <jaymode@users.noreply.github.com>
Date: Thu, 18 Apr 2019 14:17:16 -0600
Subject: [PATCH 085/112] Clean up docs regarding recommended JVM (#41356)

This change clarifies the documentation around the recommended JVM. The
recommended JVM is the bundled JVM. If a user does not use our
recommended JVM we suggest that they use a supported LTS version of the
JVM.

Closes #41132
---
 docs/reference/setup.asciidoc | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc
index d2d46670809..8fd5a44443d 100644
--- a/docs/reference/setup.asciidoc
+++ b/docs/reference/setup.asciidoc
@@ -25,16 +25,15 @@ platforms, but it is possible that it will work on other platforms too.
 
 Elasticsearch is built using Java, and includes a bundled version of
 http://openjdk.java.net[OpenJDK] from the JDK maintainers (GPLv2+CE)
-within each distribution. The bundled JVM exists within the `jdk` directory of
-the Elasticsearch home directory.
+within each distribution. The bundled JVM is the recommended JVM and
+is located within the `jdk` directory of the Elasticsearch home directory.
 
 To use your own version of Java, set the `JAVA_HOME` environment variable.
-When using your own version, the bundled JVM directory may be removed.
-If not using the bundled JVM, we recommend installing Java version
- *{jdk} or a later version in the Java {jdk_major} release series*. We recommend
-using a link:/support/matrix[supported]
+If you must use a version of Java that is different from the bundled JVM,
+we recommend using a link:/support/matrix[supported]
 http://www.oracle.com/technetwork/java/eol-135779.html[LTS version of Java].
 Elasticsearch will refuse to start if a known-bad version of Java is used.
+The bundled JVM directory may be removed when using your own JVM.
 
 --
 

From 068f8ba223376d7c1a2e30dec953b6a750ed9006 Mon Sep 17 00:00:00 2001
From: Jim Ferenczi <jim.ferenczi@elastic.co>
Date: Thu, 18 Apr 2019 22:29:36 +0200
Subject: [PATCH 086/112] more_like_this query to throw an error if the like
 fields is not provided (#40632)

With the removal of the `_all` field the `mlt` query cannot infer a field name
to use to analyze the provided (un)like text if the `fields` parameter is not
explicitly set in the query and the `index.query.default_field` is not changed
in the index settings (by default it is set to `*`). For this reason the like text
is ignored and queries are only built from the provided document ids.
This change fixes this bug by throwing an error if the fields option is not set
and the `index.query.default_field` is equals to `*`. The error is thrown only
if like or unlike texts are provided in the query.
---
 .../index/query/MoreLikeThisQueryBuilder.java |  7 +++
 .../query/MoreLikeThisQueryBuilderTests.java  | 48 +++++++++++++++++--
 .../search/morelikethis/MoreLikeThisIT.java   |  8 ++--
 3 files changed, 56 insertions(+), 7 deletions(-)

diff --git a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java
index b90a1e60ffa..11530ce5f30 100644
--- a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java
@@ -1050,6 +1050,13 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
         List<String> moreLikeFields = new ArrayList<>();
         if (useDefaultField) {
             moreLikeFields = context.defaultFields();
+            if (moreLikeFields.size() == 1
+                    && moreLikeFields.get(0).equals("*")
+                    && (likeTexts.length > 0 || unlikeTexts.length > 0)) {
+                throw new IllegalArgumentException("[more_like_this] query cannot infer the field to analyze the free text, " +
+                    "you should update the [index.query.default_field] index setting to a field that exists in the mapping or " +
+                    "set the [fields] option in the query.");
+            }
         } else {
             for (String field : fields) {
                 MappedFieldType fieldType = context.fieldMapper(field);
diff --git a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java
index 62613139b50..56e7a323472 100644
--- a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java
@@ -31,10 +31,12 @@ import org.elasticsearch.action.termvectors.MultiTermVectorsRequest;
 import org.elasticsearch.action.termvectors.MultiTermVectorsResponse;
 import org.elasticsearch.action.termvectors.TermVectorsRequest;
 import org.elasticsearch.action.termvectors.TermVectorsResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.io.stream.BytesStreamOutput;
 import org.elasticsearch.common.lucene.search.MoreLikeThisQuery;
+import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentFactory;
@@ -160,13 +162,13 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase<MoreLik
         } else {
             likeItems = randomLikeItems;
         }
-        if (randomBoolean()) { // for the default field
-            queryBuilder = new MoreLikeThisQueryBuilder(likeTexts, likeItems);
+        if (randomBoolean() && likeItems != null && likeItems.length > 0) { // for the default field
+            queryBuilder = new MoreLikeThisQueryBuilder(null, likeItems);
         } else {
             queryBuilder = new MoreLikeThisQueryBuilder(randomFields, likeTexts, likeItems);
         }
 
-        if (randomBoolean()) {
+        if (randomBoolean() && queryBuilder.fields() != null) {
             queryBuilder.unlike(generateRandomStringArray(5, 5, false, false));
         }
         if (randomBoolean()) {
@@ -305,6 +307,39 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase<MoreLik
         assertThat(e.getMessage(), containsString("more_like_this only supports text/keyword fields"));
     }
 
+    public void testDefaultField() throws IOException {
+        QueryShardContext context = createShardContext();
+
+        {
+            MoreLikeThisQueryBuilder builder =
+                new MoreLikeThisQueryBuilder(new String[]{"hello world"}, null);
+            IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+                () -> builder.toQuery(context));
+            assertThat(e.getMessage(), containsString("[more_like_this] query cannot infer"));
+        }
+
+        {
+            context.getIndexSettings().updateIndexMetaData(
+                newIndexMeta("index",
+                    context.getIndexSettings().getSettings(),
+                    Settings.builder().putList("index.query.default_field", STRING_FIELD_NAME).build()
+                )
+            );
+            try {
+                MoreLikeThisQueryBuilder builder = new MoreLikeThisQueryBuilder(new String[]{"hello world"}, null);
+                builder.toQuery(context);
+            } finally {
+                // Reset the default value
+                context.getIndexSettings().updateIndexMetaData(
+                    newIndexMeta("index",
+                        context.getIndexSettings().getSettings(),
+                        Settings.builder().putList("index.query.default_field", "*").build()
+                    )
+                );
+            }
+        }
+    }
+
     public void testMoreLikeThisBuilder() throws Exception {
         Query parsedQuery =
             parseQuery(moreLikeThisQuery(new String[]{"name.first", "name.last"}, new String[]{"something"}, null)
@@ -390,4 +425,11 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase<MoreLik
         }
         return query;
     }
+
+    private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) {
+        Settings build = Settings.builder().put(oldIndexSettings)
+            .put(indexSettings)
+            .build();
+        return IndexMetaData.builder(name).settings(build).build();
+    }
 }
diff --git a/server/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/server/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java
index 2e29c7c5a38..4492353f6f1 100644
--- a/server/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java
+++ b/server/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java
@@ -343,10 +343,10 @@ public class MoreLikeThisIT extends ESIntegTestCase {
                 new MoreLikeThisQueryBuilder(new String[] {"string_value", "int_value"}, null,
                         new Item[] {new Item("test", "1")}).minTermFreq(1).minDocFreq(1)), SearchPhaseExecutionException.class);
 
-        // mlt query with no field -> No results (because _all is not enabled)
-        searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery(new String[] {"index"}).minTermFreq(1).minDocFreq(1))
-                .get();
-        assertHitCount(searchResponse, 0L);
+        // mlt query with no field -> exception because _all is not enabled)
+        assertThrows(client().prepareSearch()
+            .setQuery(moreLikeThisQuery(new String[] {"index"}).minTermFreq(1).minDocFreq(1)),
+            SearchPhaseExecutionException.class);
 
         // mlt query with string fields
         searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery(new String[]{"string_value"}, new String[] {"index"}, null)

From 754037b71e9d783bf4d2d5cdea5538b2a17bb348 Mon Sep 17 00:00:00 2001
From: Jim Ferenczi <jim.ferenczi@elastic.co>
Date: Thu, 18 Apr 2019 22:30:51 +0200
Subject: [PATCH 087/112] Unified highlighter should ignore terms that targets
 the _id field (#41275)

The `_id` field uses a binary encoding to index terms that is not compatible with
the utf8 automaton that the unified highlighter creates to reanalyze the input.
For these reason this commit ignores terms that target the `_id` field when
`require_field_match` is set to false.

Closes #37525
---
 .../highlight/UnifiedHighlighter.java         |  5 +++-
 .../highlight/HighlighterSearchIT.java        | 23 +++++++++++++++++++
 2 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java
index 2d570d2b7c7..2a75e9c58f4 100644
--- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java
+++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java
@@ -35,6 +35,7 @@ import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.text.Text;
 import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.IdFieldMapper;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
 import org.elasticsearch.search.fetch.FetchSubPhase;
@@ -109,7 +110,9 @@ public class UnifiedHighlighter implements Highlighter {
                 final String fieldName = highlighterContext.fieldName;
                 highlighter.setFieldMatcher((name) -> fieldName.equals(name));
             } else {
-                highlighter.setFieldMatcher((name) -> true);
+                // ignore terms that targets the _id field since they use a different encoding
+                // that is not compatible with utf8
+                highlighter.setFieldMatcher(name -> IdFieldMapper.NAME.equals(name) == false);
             }
 
             Snippet[] fieldSnippets = highlighter.highlightField(highlighterContext.fieldName,
diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
index d1a66969531..1467fd1f097 100644
--- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
+++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
@@ -2947,6 +2947,29 @@ public class HighlighterSearchIT extends ESIntegTestCase {
         }
     }
 
+    public void testDisableHighlightIdField() throws Exception {
+        assertAcked(prepareCreate("test")
+            .addMapping("doc", "keyword", "type=keyword"));
+        ensureGreen();
+
+        client().prepareIndex("test", "doc", "d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1")
+            .setSource("keyword", "Hello World")
+            .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
+            .get();
+
+        for (String highlighterType : new String[] {"plain", "unified"}) {
+            SearchResponse searchResponse = client().prepareSearch()
+                .setQuery(matchQuery("_id", "d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1"))
+                .highlighter(
+                    new HighlightBuilder().field(new Field("*")
+                        .highlighterType(highlighterType).requireFieldMatch(false))
+                )
+                .get();
+            assertHitCount(searchResponse, 1);
+            assertNull(searchResponse.getHits().getAt(0).getHighlightFields().get("_id"));
+        }
+    }
+
     public static class MockAnalysisPlugin extends Plugin implements AnalysisPlugin {
 
         public final class MockSnowBall extends TokenFilter {

From 8f73e1e883e0da94359d8d581603d1608fbb38b9 Mon Sep 17 00:00:00 2001
From: Jim Ferenczi <jim.ferenczi@elastic.co>
Date: Thu, 18 Apr 2019 22:48:22 +0200
Subject: [PATCH 088/112] Fix unmapped field handling in the composite
 aggregation (#41280)

The `composite` aggregation maps unknown fields as numerics, this means that
any `after` value that is set on a query with an unmapped field on some indices
will fail if the provided value is not numeric. This commit changes the default
value source to use keyword instead in order to be able to parse any type of after
values.
---
 .../test/search.aggregation/230_composite.yml | 105 +++++++++++++++++-
 .../composite/TermsValuesSourceBuilder.java   |   4 +-
 2 files changed, 107 insertions(+), 2 deletions(-)

diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml
index 8532b40fbc1..bbd8cef689d 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml
@@ -18,6 +18,22 @@ setup:
                       nested_long:
                         type: long
 
+  - do:
+        indices.create:
+          index: other
+          body:
+            mappings:
+              properties:
+                date:
+                  type: date
+                long:
+                  type: long
+                nested:
+                  type: nested
+                  properties:
+                    nested_long:
+                      type: long
+
   - do:
       index:
         index: test
@@ -54,9 +70,15 @@ setup:
         id:    6
         body:  { "date": "2017-10-21T07:00:00" }
 
+  - do:
+      index:
+        index: other
+        id:    0
+        body:  { "date": "2017-10-20T03:08:45" }
+
   - do:
       indices.refresh:
-        index: [test]
+        index: [test, other]
 
 ---
 "Simple Composite aggregation":
@@ -419,3 +441,84 @@ setup:
   - match: { aggregations.1.2.buckets.0.doc_count:  2 }
   - match: { aggregations.1.2.buckets.1.key.nested: 1000 }
   - match: { aggregations.1.2.buckets.1.doc_count:  1 }
+
+---
+"Composite aggregation with unmapped field":
+  - skip:
+      version: " - 7.0.99"
+      reason:  starting in 7.1 the composite aggregation handles unmapped fields as keywords
+
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: [test, other]
+        body:
+          aggregations:
+            test:
+              composite:
+                sources: [
+                {
+                  "long": {
+                    "terms": {
+                      "field": "long"
+                    }
+                  }
+                },
+                {
+                  "kw": {
+                    "terms": {
+                      "field": "keyword"
+                    }
+                  }
+                }
+                ]
+
+  - match: {hits.total: 7}
+  - length: { aggregations.test.buckets: 5 }
+  - match: { aggregations.test.buckets.0.key.long: 0}
+  - match: { aggregations.test.buckets.0.key.kw: "bar" }
+  - match: { aggregations.test.buckets.0.doc_count: 2 }
+  - match: { aggregations.test.buckets.1.key.long: 10 }
+  - match: { aggregations.test.buckets.1.key.kw: "foo"}
+  - match: { aggregations.test.buckets.1.doc_count: 1 }
+  - match: { aggregations.test.buckets.2.key.long: 20 }
+  - match: { aggregations.test.buckets.2.key.kw: "foo" }
+  - match: { aggregations.test.buckets.2.doc_count: 1 }
+  - match: { aggregations.test.buckets.3.key.long: 100}
+  - match: { aggregations.test.buckets.3.key.kw: "bar" }
+  - match: { aggregations.test.buckets.3.doc_count: 1 }
+  - match: { aggregations.test.buckets.4.key.long: 1000 }
+  - match: { aggregations.test.buckets.4.key.kw: "bar" }
+  - match: { aggregations.test.buckets.4.doc_count: 1 }
+
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: [test, other]
+        body:
+          aggregations:
+            test:
+              composite:
+                after: { "long": 100, "kw": "bar" }
+                sources: [
+                {
+                  "long": {
+                    "terms": {
+                      "field": "long"
+                    }
+                  }
+                },
+                {
+                  "kw": {
+                    "terms": {
+                      "field": "keyword"
+                    }
+                  }
+                }
+                ]
+
+  - match: {hits.total: 7}
+  - length: { aggregations.test.buckets: 1 }
+  - match: { aggregations.test.buckets.0.key.long: 1000 }
+  - match: { aggregations.test.buckets.0.key.kw: "bar" }
+  - match: { aggregations.test.buckets.0.doc_count: 1 }
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java
index 75bdd02855e..3ef871f59b9 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java
@@ -83,7 +83,9 @@ public class TermsValuesSourceBuilder extends CompositeValuesSourceBuilder<Terms
     protected CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSourceConfig<?> config) throws IOException {
         ValuesSource vs = config.toValuesSource(context.getQueryShardContext());
         if (vs == null) {
-            vs = ValuesSource.Numeric.EMPTY;
+            // The field is unmapped so we use a value source that can parse any type of values.
+            // This is needed because the after values are parsed even when there are no values to process.
+            vs = ValuesSource.Bytes.WithOrdinals.EMPTY;
         }
         final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null;
         final DocValueFormat format;

From c03394c236f36f6a3d441dfad3a6cbd9280dedb2 Mon Sep 17 00:00:00 2001
From: Lisa Cawley <lcawley@elastic.co>
Date: Thu, 18 Apr 2019 17:30:08 -0700
Subject: [PATCH 089/112] [DOCS] Fixes deprecation notice in pagerduty action
 (#41362)

---
 x-pack/docs/en/watcher/actions/pagerduty.asciidoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/x-pack/docs/en/watcher/actions/pagerduty.asciidoc b/x-pack/docs/en/watcher/actions/pagerduty.asciidoc
index 87a7b06b2cf..1cd9132a57b 100644
--- a/x-pack/docs/en/watcher/actions/pagerduty.asciidoc
+++ b/x-pack/docs/en/watcher/actions/pagerduty.asciidoc
@@ -155,7 +155,7 @@ must specify an account name and integration key, (see {ref}/secure-settings.htm
 bin/elasticsearch-keystore add xpack.notification.pagerduty.account.my_pagerduty_account.secure_service_api_key
 --------------------------------------------------
 
-deprecated[Storing the service api key in the YAML file or via cluster update settings is still supported, but the keystore setting should be used]
+deprecated[7.0.0, "Storing the service api key in the YAML file or via cluster update settings is still supported, but the keystore setting should be used."]
 
 You can also specify defaults for the <<pagerduty-event-trigger-incident-attributes, 
 PagerDuty event attributes>>:

From 1f91759bc7d0749f4872bbe362906b4714c4b936 Mon Sep 17 00:00:00 2001
From: Alpar Torok <torokalpar@gmail.com>
Date: Fri, 19 Apr 2019 08:55:10 +0300
Subject: [PATCH 090/112] Improove the configuration time if the build (#41251)

This will help with reproduction lines and running tests form IDEs and
other operations that are quick and executed often enough for the
configuration time to matter.

Running Gradle with a FIPS JVM is not supproted, so if the runtime JVM
is the same one, no need to spend time checking for fips support.

Verification of the JAVA<version>_HOME env vars is now async and
parallel so it doesn't block configuration.
---
 .../elasticsearch/gradle/BuildPlugin.groovy   | 61 +++++++++++++------
 1 file changed, 41 insertions(+), 20 deletions(-)

diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
index b585180bc29..98b4e9537e2 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
@@ -61,6 +61,9 @@ import org.gradle.util.GradleVersion
 import java.nio.charset.StandardCharsets
 import java.time.ZoneOffset
 import java.time.ZonedDateTime
+import java.util.concurrent.ExecutorService
+import java.util.concurrent.Executors
+import java.util.concurrent.Future
 import java.util.regex.Matcher
 
 /**
@@ -153,8 +156,12 @@ class BuildPlugin implements Plugin<Project> {
                 runtimeJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, runtimeJavaHome))
             }
 
-            String inFipsJvmScript = 'print(java.security.Security.getProviders()[0].name.toLowerCase().contains("fips"));'
-            boolean inFipsJvm = Boolean.parseBoolean(runJavaAsScript(project, runtimeJavaHome, inFipsJvmScript))
+            boolean inFipsJvm = false
+            if (new File(runtimeJavaHome).canonicalPath != gradleJavaHome.canonicalPath) {
+                // We don't expect Gradle to be running in a FIPS JVM
+                String inFipsJvmScript = 'print(java.security.Security.getProviders()[0].name.toLowerCase().contains("fips"));'
+                inFipsJvm = Boolean.parseBoolean(runJavaAsScript(project, runtimeJavaHome, inFipsJvmScript))
+            }
 
             // Build debugging info
             println '======================================='
@@ -190,24 +197,38 @@ class BuildPlugin implements Plugin<Project> {
                 throw new GradleException(message)
             }
 
-            for (final Map.Entry<Integer, String> javaVersionEntry : javaVersions.entrySet()) {
-                final String javaHome = javaVersionEntry.getValue()
-                if (javaHome == null) {
-                    continue
-                }
-                JavaVersion javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome))
-                final JavaVersion expectedJavaVersionEnum
-                final int version = javaVersionEntry.getKey()
-                if (version < 9) {
-                    expectedJavaVersionEnum = JavaVersion.toVersion("1." + version)
-                } else {
-                    expectedJavaVersionEnum = JavaVersion.toVersion(Integer.toString(version))
-                }
-                if (javaVersionEnum != expectedJavaVersionEnum) {
-                    final String message =
-                            "the environment variable JAVA" + version + "_HOME must be set to a JDK installation directory for Java" +
-                                    " ${expectedJavaVersionEnum} but is [${javaHome}] corresponding to [${javaVersionEnum}]"
-                    throw new GradleException(message)
+            ExecutorService exec = Executors.newFixedThreadPool(javaVersions.size())
+            Set<Future<Void>> results = new HashSet<>()
+
+            javaVersions.entrySet().stream()
+                    .filter { it.getValue() != null }
+                    .forEach { javaVersionEntry ->
+                        results.add(exec.submit {
+                            final String javaHome = javaVersionEntry.getValue()
+                            final int version = javaVersionEntry.getKey()
+                            if (project.file(javaHome).exists() == false) {
+                                throw new GradleException("Invalid JAVA${version}_HOME=${javaHome} location does not exist")
+                            }
+
+                            JavaVersion javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome))
+                            final JavaVersion expectedJavaVersionEnum = version < 9 ?
+                                    JavaVersion.toVersion("1." + version) :
+                                    JavaVersion.toVersion(Integer.toString(version))
+
+                            if (javaVersionEnum != expectedJavaVersionEnum) {
+                                final String message =
+                                        "the environment variable JAVA" + version + "_HOME must be set to a JDK installation directory for Java" +
+                                                " ${expectedJavaVersionEnum} but is [${javaHome}] corresponding to [${javaVersionEnum}]"
+                                throw new GradleException(message)
+                            }
+                        })
+            }
+
+            project.gradle.taskGraph.whenReady {
+                try {
+                    results.forEach { it.get() }
+                } finally {
+                    exec.shutdown();
                 }
             }
 

From 2f8bbce85da34d34718f674c5e425d186d271af4 Mon Sep 17 00:00:00 2001
From: Alpar Torok <torokalpar@gmail.com>
Date: Fri, 19 Apr 2019 09:19:28 +0300
Subject: [PATCH 091/112] Clean up build tool dependencies (#41336)

We are no longer using these dependencies.

Relates to #41061 since the class that seems to be leaking is both part
of Gradle and the logging jar.
---
 buildSrc/build.gradle | 13 +------------
 1 file changed, 1 insertion(+), 12 deletions(-)

diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
index 2175c2700d8..3ff07564b5b 100644
--- a/buildSrc/build.gradle
+++ b/buildSrc/build.gradle
@@ -113,8 +113,6 @@ repositories {
 
 dependencies {
   compile localGroovy()
-  compile "com.carrotsearch.randomizedtesting:junit4-ant:${props.getProperty('randomizedrunner')}"
-  compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}"
   
   compile 'com.netflix.nebula:gradle-extra-configurations-plugin:3.0.3'
   compile 'com.netflix.nebula:nebula-publishing-plugin:4.4.4'
@@ -127,16 +125,7 @@ dependencies {
   compile 'de.thetaphi:forbiddenapis:2.6'
   compile 'com.avast.gradle:gradle-docker-compose-plugin:0.8.12'
   testCompile "junit:junit:${props.getProperty('junit')}"
-}
-
-
-// Gradle 2.14+ removed ProgressLogger(-Factory) classes from the public APIs
-// Use logging dependency instead
-// Gradle 4.3.1 stopped releasing the logging jars to jcenter, just use the last available one
-GradleVersion logVersion = GradleVersion.current() > GradleVersion.version('4.3') ? GradleVersion.version('4.3') : GradleVersion.current()
-
-dependencies {
-  compileOnly "org.gradle:gradle-logging:${logVersion.getVersion()}"
+  testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}"
 }
 
 /*****************************************************************************

From 4ef4ed66b917634eaf3830455cc0ae5e75860db6 Mon Sep 17 00:00:00 2001
From: Alpar Torok <torokalpar@gmail.com>
Date: Fri, 19 Apr 2019 09:34:23 +0300
Subject: [PATCH 092/112] Convert repository-hdfs to testclusters (#41252)

* Convert repository-hdfs to testclusters

Relates #40862
---
 plugins/build.gradle                 |   6 +-
 plugins/repository-hdfs/build.gradle | 126 +++++++++++----------------
 2 files changed, 50 insertions(+), 82 deletions(-)

diff --git a/plugins/build.gradle b/plugins/build.gradle
index 585f26c3780..89a4fe7384c 100644
--- a/plugins/build.gradle
+++ b/plugins/build.gradle
@@ -20,11 +20,7 @@
 // only configure immediate children of plugins dir
 configure(subprojects.findAll { it.parent.path == project.path }) {
   group = 'org.elasticsearch.plugin'
-  // TODO exclude some plugins as they require features not yet supproted by testclusters
-  if (false ==  name in ['repository-hdfs']) {
-      apply plugin: 'elasticsearch.testclusters'       
-  }
-
+  apply plugin: 'elasticsearch.testclusters'       
   apply plugin: 'elasticsearch.esplugin'
 
   esplugin {
diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle
index 24b03621eba..ad24de0e093 100644
--- a/plugins/repository-hdfs/build.gradle
+++ b/plugins/repository-hdfs/build.gradle
@@ -18,7 +18,6 @@
  */
 
 import org.apache.tools.ant.taskdefs.condition.Os
-import org.elasticsearch.gradle.test.ClusterConfiguration
 import org.elasticsearch.gradle.test.RestIntegTestTask
 
 import java.nio.file.Files
@@ -64,15 +63,17 @@ dependencies {
   compile "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}"
 
   hdfsFixture project(':test:fixtures:hdfs-fixture')
+  // Set the keytab files in the classpath so that we can access them from test code without the security manager
+  // freaking out.
+  testRuntime fileTree(dir: project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab").parent, include: ['*.keytab'])
 }
 
 dependencyLicenses {
   mapping from: /hadoop-.*/, to: 'hadoop'
 }
 
-
 String realm = "BUILD.ELASTIC.CO"
-
+String krb5conf = project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs")
 
 // Create HDFS File System Testing Fixtures for HA/Secure combinations
 for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', 'secureHaHdfsFixture']) {
@@ -96,7 +97,6 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture',
         miniHDFSArgs.add('--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED')
       }
     }
-
     // If it's an HA fixture, set a nameservice to use in the JVM options
     if (fixtureName.equals('haHdfsFixture') || fixtureName.equals('secureHaHdfsFixture')) {
       miniHDFSArgs.add("-Dha-nameservice=ha-hdfs")
@@ -110,8 +110,7 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture',
     if (fixtureName.equals('secureHdfsFixture') || fixtureName.equals('secureHaHdfsFixture')) {
       miniHDFSArgs.add("hdfs/hdfs.build.elastic.co@${realm}")
       miniHDFSArgs.add(
-              project(':test:fixtures:krb5kdc-fixture')
-                      .ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab")
+              project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab")
       )
     }
 
@@ -119,67 +118,36 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture',
   }
 }
 
-// The following closure must execute before the afterEvaluate block in the constructor of the following integrationTest tasks:
-project.afterEvaluate {
-  for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) {
-    ClusterConfiguration cluster = project.extensions.getByName("${integTestTaskName}Cluster") as ClusterConfiguration
-    cluster.dependsOn(project.bundlePlugin)
-
-    Task restIntegTestTask = project.tasks.getByName(integTestTaskName)
-    restIntegTestTask.clusterConfig.plugin(project.path)
-
-    // Default jvm arguments for all test clusters
-    String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') +
-            " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
-            " " + System.getProperty('tests.jvm.argline', '')
-
-    // If it's a secure cluster, add the keytab as an extra config, and set the krb5 conf in the JVM options.
-    if (integTestTaskName.equals('integTestSecure') || integTestTaskName.equals('integTestSecureHa')) {
-      String krb5conf = project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs")
-      restIntegTestTask.clusterConfig.extraConfigFile(
-              "repository-hdfs/krb5.keytab",
-              "${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}"
-      )
-      jvmArgs = jvmArgs + " " + "-Djava.security.krb5.conf=${krb5conf}"
-      if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) {
-        jvmArgs = jvmArgs + " " + '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED'
-      }
-
-      // If it's the HA + Secure tests then also set the Kerberos settings for the integration test JVM since we'll
-      // need to auth to HDFS to trigger namenode failovers.
-      if (integTestTaskName.equals('integTestSecureHa')) {
-        Task restIntegTestTaskRunner = project.tasks.getByName("${integTestTaskName}Runner")
-        restIntegTestTaskRunner.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}"
-        restIntegTestTaskRunner.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}"
-        restIntegTestTaskRunner.jvmArgs "-Djava.security.krb5.conf=${krb5conf}"
-        if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) {
-          restIntegTestTaskRunner.jvmArgs '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED'
-        }
-        restIntegTestTaskRunner.systemProperty (
+for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) {
+  task "${integTestTaskName}"(type: RestIntegTestTask) {
+    description = "Runs rest tests against an elasticsearch cluster with HDFS."
+    dependsOn(project.bundlePlugin)
+    runner {
+      if (integTestTaskName.contains("Secure")) {
+        dependsOn secureHdfsFixture
+        systemProperty "test.krb5.principal.es", "elasticsearch@${realm}"
+        systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}"
+        jvmArgs "-Djava.security.krb5.conf=${krb5conf}"
+        systemProperty (
                 "test.krb5.keytab.hdfs",
                 project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab")
         )
       }
     }
+  }
 
-    restIntegTestTask.clusterConfig.jvmArgs = jvmArgs
+  testClusters."${integTestTaskName}" {
+    plugin(file(bundlePlugin.archiveFile))
+    if (integTestTaskName.contains("Secure")) {
+      systemProperty "java.security.krb5.conf", krb5conf
+      extraConfigFile(
+              "repository-hdfs/krb5.keytab",
+              file("${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}")
+      )
+    }
   }
 }
 
-// Create a Integration Test suite just for HA based tests
-RestIntegTestTask integTestHa = project.tasks.create('integTestHa', RestIntegTestTask.class) {
-  description = "Runs rest tests against an elasticsearch cluster with HDFS configured with HA Namenode."
-}
-
-// Create a Integration Test suite just for security based tests
-RestIntegTestTask integTestSecure = project.tasks.create('integTestSecure', RestIntegTestTask.class) {
-  description = "Runs rest tests against an elasticsearch cluster with HDFS secured by MIT Kerberos."
-}
-
-// Create a Integration Test suite just for HA related security based tests
-RestIntegTestTask integTestSecureHa = project.tasks.create('integTestSecureHa', RestIntegTestTask.class) {
-  description = "Runs rest tests against an elasticsearch cluster with HDFS configured with HA Namenode and secured by MIT Kerberos."
-}
 
 // Determine HDFS Fixture compatibility for the current build environment.
 boolean fixtureSupported = false
@@ -208,21 +176,27 @@ if (legalPath == false) {
 
 // Always ignore HA integration tests in the normal integration test runner, they are included below as
 // part of their own HA-specific integration test tasks.
-integTestRunner.exclude('**/Ha*TestSuiteIT.class')
+integTest.runner {
+  exclude('**/Ha*TestSuiteIT.class')
+}
 
 if (fixtureSupported) {
   // Check depends on the HA test. Already depends on the standard test.
   project.check.dependsOn(integTestHa)
 
   // Both standard and HA tests depend on their respective HDFS fixtures
-  integTestCluster.dependsOn hdfsFixture
-  integTestHaCluster.dependsOn haHdfsFixture
+  integTest.dependsOn hdfsFixture
+  integTestHa.dependsOn haHdfsFixture
 
   // The normal test runner only runs the standard hdfs rest tests
-  integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository'
+  integTest.runner {
+    systemProperty 'tests.rest.suite', 'hdfs_repository'
+  }
 
   // Only include the HA integration tests for the HA test task
-  integTestHaRunner.setIncludes(['**/Ha*TestSuiteIT.class'])
+  integTestHa.runner {
+    setIncludes(['**/Ha*TestSuiteIT.class'])
+  }
 } else {
   if (legalPath) {
     logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH")
@@ -231,29 +205,27 @@ if (fixtureSupported) {
   }
 
   // The normal integration test runner will just test that the plugin loads
-  integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository/10_basic'
+  integTest.runner {
+    systemProperty 'tests.rest.suite', 'hdfs_repository/10_basic'
+  }
   // HA fixture is unsupported. Don't run them.
   integTestHa.setEnabled(false)
 }
 
 check.dependsOn(integTestSecure, integTestSecureHa)
 
-// Fixture dependencies
-integTestSecureCluster.dependsOn secureHdfsFixture
-integTestSecureHaCluster.dependsOn secureHaHdfsFixture
-
-// Set the keytab files in the classpath so that we can access them from test code without the security manager
-// freaking out.
-project.dependencies {
-  testRuntime fileTree(dir: project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab").parent, include: ['*.keytab'])
-}
-
 // Run just the secure hdfs rest test suite.
-integTestSecureRunner.systemProperty 'tests.rest.suite', 'secure_hdfs_repository'
+integTestSecure.runner {
+  systemProperty 'tests.rest.suite', 'secure_hdfs_repository'
+}
 // Ignore HA integration Tests. They are included below as part of integTestSecureHa test runner.
-integTestSecureRunner.exclude('**/Ha*TestSuiteIT.class')
+integTestSecure.runner {
+  exclude('**/Ha*TestSuiteIT.class')
+}
 // Only include the HA integration tests for the HA test task
-integTestSecureHaRunner.setIncludes(['**/Ha*TestSuiteIT.class'])
+integTestSecureHa.runner {
+  setIncludes(['**/Ha*TestSuiteIT.class'])
+}
 
 thirdPartyAudit {
     ignoreMissingClasses()

From 0bb15d3dac6f2da73df6aca68bf2c389ace26680 Mon Sep 17 00:00:00 2001
From: David Turner <david.turner@elastic.co>
Date: Fri, 19 Apr 2019 07:37:22 +0100
Subject: [PATCH 093/112] Allow ops to be blocked after primary promotion
 (#41360)

Today we assert that there are no operations in flight in this test. However we
will sometimes be in a situation where the operations are blocked, and we
distinguish these cases since #41271 causing the assertion to fail. This commit
addresses this by allowing operations to be blocked sometimes after a primary
promotion.

Fixes #41333.
---
 .../org/elasticsearch/index/shard/IndexShardTests.java | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
index 0ac168519c6..07a0f3e3f3e 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
@@ -182,6 +182,7 @@ import static org.hamcrest.Matchers.hasSize;
 import static org.hamcrest.Matchers.hasToString;
 import static org.hamcrest.Matchers.instanceOf;
 import static org.hamcrest.Matchers.isIn;
+import static org.hamcrest.Matchers.isOneOf;
 import static org.hamcrest.Matchers.lessThan;
 import static org.hamcrest.Matchers.lessThanOrEqualTo;
 import static org.hamcrest.Matchers.not;
@@ -630,7 +631,7 @@ public class IndexShardTests extends IndexShardTestCase {
         closeShards(indexShard);
     }
 
-    public void testOperationPermitsOnPrimaryShards() throws InterruptedException, ExecutionException, IOException {
+    public void testOperationPermitsOnPrimaryShards() throws Exception {
         final ShardId shardId = new ShardId("test", "_na_", 0);
         final IndexShard indexShard;
 
@@ -638,6 +639,7 @@ public class IndexShardTests extends IndexShardTestCase {
             // relocation target
             indexShard = newShard(newShardRouting(shardId, "local_node", "other node",
                 true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(AllocationId.newInitializing())));
+            assertEquals(0, indexShard.getActiveOperationsCount());
         } else if (randomBoolean()) {
             // simulate promotion
             indexShard = newStartedShard(false);
@@ -654,11 +656,15 @@ public class IndexShardTests extends IndexShardTestCase {
                 new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(),
                 Collections.emptySet());
             latch.await();
+            assertThat(indexShard.getActiveOperationsCount(), isOneOf(0, IndexShard.OPERATIONS_BLOCKED));
+            if (randomBoolean()) {
+                assertBusy(() -> assertEquals(0, indexShard.getActiveOperationsCount()));
+            }
         } else {
             indexShard = newStartedShard(true);
+            assertEquals(0, indexShard.getActiveOperationsCount());
         }
         final long primaryTerm = indexShard.getPendingPrimaryTerm();
-        assertEquals(0, indexShard.getActiveOperationsCount());
         Releasable operation1 = acquirePrimaryOperationPermitBlockingly(indexShard);
         assertEquals(1, indexShard.getActiveOperationsCount());
         Releasable operation2 = acquirePrimaryOperationPermitBlockingly(indexShard);

From e1e2568fa302f8253a0b1c08183640559b1027e7 Mon Sep 17 00:00:00 2001
From: Alpar Torok <torokalpar@gmail.com>
Date: Fri, 19 Apr 2019 09:46:21 +0300
Subject: [PATCH 094/112] Add FIPS specific testclusters configuration (#41199)

ClusterFormationTasks auto configured these properties for clusters.
This PR adds FIPS specific configuration across all test clusters from
the main build script to prevent coupling betwwen testclusters and the
build plugin.

Closes #40904
---
 build.gradle                                      | 15 +++++++++++++++
 .../org/elasticsearch/gradle/BuildPlugin.groovy   |  6 ------
 modules/reindex/build.gradle                      |  6 ------
 3 files changed, 15 insertions(+), 12 deletions(-)

diff --git a/build.gradle b/build.gradle
index 2e3fc3178bf..9ca5daec6cc 100644
--- a/build.gradle
+++ b/build.gradle
@@ -598,6 +598,21 @@ allprojects {
   } 
 }
 
+subprojects {
+    // Common config when running with a FIPS-140 runtime JVM
+    if (project.ext.has("inFipsJvm") && project.ext.inFipsJvm) {
+        tasks.withType(Test) {
+          systemProperty 'javax.net.ssl.trustStorePassword', 'password'
+          systemProperty 'javax.net.ssl.keyStorePassword', 'password'
+        }
+        project.pluginManager.withPlugin("elasticsearch.testclusters") {
+          project.testClusters.all {
+            systemProperty 'javax.net.ssl.trustStorePassword', 'password'
+            systemProperty 'javax.net.ssl.keyStorePassword', 'password'
+          }
+        }
+    }
+}
 
 
 
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
index 98b4e9537e2..72f220c49cf 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
@@ -1004,12 +1004,6 @@ class BuildPlugin implements Plugin<Project> {
                 // TODO: remove this once ctx isn't added to update script params in 7.0
                 systemProperty 'es.scripting.update.ctx_in_params', 'false'
 
-                // Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM
-                if (project.inFipsJvm) {
-                    systemProperty 'javax.net.ssl.trustStorePassword', 'password'
-                    systemProperty 'javax.net.ssl.keyStorePassword', 'password'
-                }
-
                 testLogging {
                     showExceptions = true
                     showCauses = true
diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle
index 48888d1bfce..da184deedaa 100644
--- a/modules/reindex/build.gradle
+++ b/modules/reindex/build.gradle
@@ -95,12 +95,6 @@ dependencies {
   es090 'org.elasticsearch:elasticsearch:0.90.13@zip'
 }
 
-// Issue tracked in https://github.com/elastic/elasticsearch/issues/40904
-if (project.inFipsJvm) {
-  testingConventions.enabled = false
-  integTest.enabled = false
-}
-
 if (Os.isFamily(Os.FAMILY_WINDOWS)) {
   logger.warn("Disabling reindex-from-old tests because we can't get the pid file on windows")
   integTest.runner {

From d989079df5d90c7132531d92d0b793f2b16be9a8 Mon Sep 17 00:00:00 2001
From: Arlind <xhakoliarlind@gmail.com>
Date: Fri, 19 Apr 2019 10:24:29 +0200
Subject: [PATCH 095/112] Update glossary.asciidoc (#41364)

---
 docs/reference/glossary.asciidoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/reference/glossary.asciidoc b/docs/reference/glossary.asciidoc
index 7221a38cce7..e59e57f3c5a 100644
--- a/docs/reference/glossary.asciidoc
+++ b/docs/reference/glossary.asciidoc
@@ -132,7 +132,7 @@ more primary shards to scale the number of <<glossary-document,documents>>
 that your index can handle.
 +
 You cannot change the number of primary shards in an index, once the index is
-index is created. However, an index can be split into a new index using the
+created. However, an index can be split into a new index using the
 <<indices-split-index, split API>>.
 +
 See also <<glossary-routing,routing>>

From 7a34ba35f7e362a541f52d8dc884118c193db7c9 Mon Sep 17 00:00:00 2001
From: Marios Trivyzas <matriv@gmail.com>
Date: Fri, 19 Apr 2019 19:03:28 +0300
Subject: [PATCH 096/112] SQL: Fix bug with optimization of null related
 conditionals (#41355)

The SimplifyConditional rule is removing NULL literals from those
functions to simplify their evaluation. This happens in the Optimizer
and a new instance of the conditional function is generated. Previously,
the dataType was not set properly (defaulted to DataType.NULL) for
those new instances and since the resolveType() wasn't called again
it resulted in returning always null.

E.g.:

SELECT COALESCE(null, 'foo', null, 'bar')

COALESCE(null, 'foo', null, 'bar')
-----------------
null

This issue was not visible before because the tests always used an alias
for the conditional function which caused the resolveType() to be
called which sets the dataType properly.

E.g.:

SELECT COALESCE(null, 'foo', null, 'bar') as c

c
-----------------
foo

(cherry picked from commit c39980a65dd593363f1d8d1b038b26cb0ce02aaf)
---
 x-pack/plugin/sql/qa/src/main/resources/null.csv-spec    | 7 +++++++
 .../predicate/conditional/ConditionalFunction.java       | 9 +++++++--
 .../xpack/sql/optimizer/OptimizerTests.java              | 6 ++++++
 3 files changed, 20 insertions(+), 2 deletions(-)

diff --git a/x-pack/plugin/sql/qa/src/main/resources/null.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/null.csv-spec
index 19541cf5d9f..610217b2333 100644
--- a/x-pack/plugin/sql/qa/src/main/resources/null.csv-spec
+++ b/x-pack/plugin/sql/qa/src/main/resources/null.csv-spec
@@ -61,6 +61,13 @@ c:i
 ;
 
 coalesceMixed
+SELECT COALESCE(null, 123, null, 321);
+
+COALESCE(null, 123, null, 321):i
+123
+;
+
+coalesceMixedWithAlias
 SELECT COALESCE(null, 123, null, 321) AS c;
 
 c:i
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java
index 3de85185e8a..b3841f09e82 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java
@@ -25,7 +25,7 @@ import static org.elasticsearch.xpack.sql.util.StringUtils.ordinal;
  */
 public abstract class ConditionalFunction extends ScalarFunction {
 
-    protected DataType dataType = DataType.NULL;
+    protected DataType dataType = null;
 
     ConditionalFunction(Source source, List<Expression> fields) {
         super(source, fields);
@@ -33,6 +33,12 @@ public abstract class ConditionalFunction extends ScalarFunction {
 
     @Override
     public DataType dataType() {
+        if (dataType == null) {
+            dataType = DataType.NULL;
+            for (Expression exp : children()) {
+                dataType = DataTypeConversion.commonType(dataType, exp.dataType());
+            }
+        }
         return dataType;
     }
 
@@ -61,7 +67,6 @@ public abstract class ConditionalFunction extends ScalarFunction {
                         child.dataType().typeName));
                 }
             }
-            dataType = DataTypeConversion.commonType(dataType, child.dataType());
         }
         return TypeResolution.TYPE_RESOLVED;
     }
diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java
index c95206c29e9..eb8ac2b4d15 100644
--- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java
+++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java
@@ -501,6 +501,7 @@ public class OptimizerTests extends ESTestCase {
                         randomListOfNulls())));
         assertEquals(1, e.children().size());
         assertEquals(TRUE, e.children().get(0));
+        assertEquals(DataType.BOOLEAN, e.dataType());
     }
 
     private List<Expression> randomListOfNulls() {
@@ -514,6 +515,7 @@ public class OptimizerTests extends ESTestCase {
         assertEquals(Coalesce.class, e.getClass());
         assertEquals(1, e.children().size());
         assertEquals(TRUE, e.children().get(0));
+        assertEquals(DataType.BOOLEAN, e.dataType());
     }
 
     public void testSimplifyIfNullNulls() {
@@ -527,11 +529,13 @@ public class OptimizerTests extends ESTestCase {
         assertEquals(IfNull.class, e.getClass());
         assertEquals(1, e.children().size());
         assertEquals(ONE, e.children().get(0));
+        assertEquals(DataType.INTEGER, e.dataType());
 
         e = new SimplifyConditional().rule(new IfNull(EMPTY, ONE, NULL));
         assertEquals(IfNull.class, e.getClass());
         assertEquals(1, e.children().size());
         assertEquals(ONE, e.children().get(0));
+        assertEquals(DataType.INTEGER, e.dataType());
     }
 
     public void testFoldNullNotAppliedOnNullIf() {
@@ -559,6 +563,7 @@ public class OptimizerTests extends ESTestCase {
         assertEquals(2, e.children().size());
         assertEquals(ONE, e.children().get(0));
         assertEquals(TWO, e.children().get(1));
+        assertEquals(DataType.INTEGER, e.dataType());
     }
 
     public void testSimplifyLeastNulls() {
@@ -580,6 +585,7 @@ public class OptimizerTests extends ESTestCase {
         assertEquals(2, e.children().size());
         assertEquals(ONE, e.children().get(0));
         assertEquals(TWO, e.children().get(1));
+        assertEquals(DataType.INTEGER, e.dataType());
     }
     
     public void testConcatFoldingIsNotNull() {

From bcd0939b611a26b3cddee5c849fba203e72ddc0d Mon Sep 17 00:00:00 2001
From: Ryan Ernst <ryan@iernst.net>
Date: Fri, 19 Apr 2019 09:49:36 -0700
Subject: [PATCH 097/112] Add a rule for task dependencies (#41322)

This commit adds a task rule to print the task dependencies of any task.
It only prints the direct dependencies, but makes debugging missing
dependencies a lot easier.
---
 build.gradle | 25 +++++++++++++++++++++++--
 1 file changed, 23 insertions(+), 2 deletions(-)

diff --git a/build.gradle b/build.gradle
index 9ca5daec6cc..3cd35d2a5d4 100644
--- a/build.gradle
+++ b/build.gradle
@@ -584,9 +584,30 @@ allprojects {
         configurations.findAll { it.isCanBeResolved() }.each { it.resolve() }
       }
   }
-}
 
-allprojects {
+  // helper task to print direct dependencies of a single task
+  project.tasks.addRule("Pattern: <taskName>Dependencies") { String taskName ->
+    if (taskName.endsWith("Dependencies") == false) {
+      return
+    }
+    if (project.tasks.findByName(taskName) != null) {
+      return
+    }
+    String realTaskName = taskName.substring(0, taskName.length() - "Dependencies".length())
+    Task realTask = project.tasks.findByName(realTaskName)
+    if (realTask == null) {
+      return
+    }
+    project.tasks.create(taskName) {
+      doLast {
+        println("${realTask.path} dependencies:")
+        for (Task dep : realTask.getTaskDependencies().getDependencies(realTask)) {
+          println("  - ${dep.path}")
+        }
+      }
+    }
+  }
+
   task checkPart1
   task checkPart2 
   tasks.matching { it.name == "check" }.all { check ->

From 154d40494f36415f36643784d2de04c7f097e590 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Fri, 19 Apr 2019 14:08:22 -0400
Subject: [PATCH 098/112] Fix build issue if no specific Java version are set
 (#41379)

If no Java versions are set then when we size the executor thread pool
we end up with zero threads, which is illegal. This commit avoids that
problem by only starting the executor when needed.
---
 .../elasticsearch/gradle/BuildPlugin.groovy   | 77 ++++++++++---------
 1 file changed, 40 insertions(+), 37 deletions(-)

diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
index 72f220c49cf..8219e1a2870 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
@@ -130,13 +130,6 @@ class BuildPlugin implements Plugin<Project> {
             String runtimeJavaHome = findRuntimeJavaHome(compilerJavaHome)
             File gradleJavaHome = Jvm.current().javaHome
 
-            final Map<Integer, String> javaVersions = [:]
-            for (int version = 8; version <= Integer.parseInt(minimumCompilerVersion.majorVersion); version++) {
-                if(System.getenv(getJavaHomeEnvVarName(version.toString())) != null) {
-                    javaVersions.put(version, findJavaHome(version.toString()));
-                }
-            }
-
             String javaVendor = System.getProperty('java.vendor')
             String gradleJavaVersion = System.getProperty('java.version')
             String gradleJavaVersionDetails = "${javaVendor} ${gradleJavaVersion}" +
@@ -197,38 +190,48 @@ class BuildPlugin implements Plugin<Project> {
                 throw new GradleException(message)
             }
 
-            ExecutorService exec = Executors.newFixedThreadPool(javaVersions.size())
-            Set<Future<Void>> results = new HashSet<>()
-
-            javaVersions.entrySet().stream()
-                    .filter { it.getValue() != null }
-                    .forEach { javaVersionEntry ->
-                        results.add(exec.submit {
-                            final String javaHome = javaVersionEntry.getValue()
-                            final int version = javaVersionEntry.getKey()
-                            if (project.file(javaHome).exists() == false) {
-                                throw new GradleException("Invalid JAVA${version}_HOME=${javaHome} location does not exist")
-                            }
-
-                            JavaVersion javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome))
-                            final JavaVersion expectedJavaVersionEnum = version < 9 ?
-                                    JavaVersion.toVersion("1." + version) :
-                                    JavaVersion.toVersion(Integer.toString(version))
-
-                            if (javaVersionEnum != expectedJavaVersionEnum) {
-                                final String message =
-                                        "the environment variable JAVA" + version + "_HOME must be set to a JDK installation directory for Java" +
-                                                " ${expectedJavaVersionEnum} but is [${javaHome}] corresponding to [${javaVersionEnum}]"
-                                throw new GradleException(message)
-                            }
-                        })
+            final Map<Integer, String> javaVersions = [:]
+            for (int version = 8; version <= Integer.parseInt(minimumCompilerVersion.majorVersion); version++) {
+                if(System.getenv(getJavaHomeEnvVarName(version.toString())) != null) {
+                    javaVersions.put(version, findJavaHome(version.toString()));
+                }
             }
 
-            project.gradle.taskGraph.whenReady {
-                try {
-                    results.forEach { it.get() }
-                } finally {
-                    exec.shutdown();
+            if (javaVersions.isEmpty() == false) {
+
+                ExecutorService exec = Executors.newFixedThreadPool(javaVersions.size())
+                Set<Future<Void>> results = new HashSet<>()
+
+                javaVersions.entrySet().stream()
+                        .filter { it.getValue() != null }
+                        .forEach { javaVersionEntry ->
+                    results.add(exec.submit {
+                        final String javaHome = javaVersionEntry.getValue()
+                        final int version = javaVersionEntry.getKey()
+                        if (project.file(javaHome).exists() == false) {
+                            throw new GradleException("Invalid JAVA${version}_HOME=${javaHome} location does not exist")
+                        }
+
+                        JavaVersion javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome))
+                        final JavaVersion expectedJavaVersionEnum = version < 9 ?
+                                JavaVersion.toVersion("1." + version) :
+                                JavaVersion.toVersion(Integer.toString(version))
+
+                        if (javaVersionEnum != expectedJavaVersionEnum) {
+                            final String message =
+                                    "the environment variable JAVA" + version + "_HOME must be set to a JDK installation directory for Java" +
+                                            " ${expectedJavaVersionEnum} but is [${javaHome}] corresponding to [${javaVersionEnum}]"
+                            throw new GradleException(message)
+                        }
+                    })
+                }
+
+                project.gradle.taskGraph.whenReady {
+                    try {
+                        results.forEach { it.get() }
+                    } finally {
+                        exec.shutdown();
+                    }
                 }
             }
 

From c0164cbb637570659a4316f0c69b45dfc93f8ac9 Mon Sep 17 00:00:00 2001
From: Ryan Ernst <ryan@iernst.net>
Date: Fri, 19 Apr 2019 11:34:16 -0700
Subject: [PATCH 099/112] Only include relevant platform files from modules
 (#41089)

This commit adds a filter to the files include from modules to only
include platform specific files relevant to the distribution being
built. For example, the deb files on linux would now only include linux
ML binaries, and not windows or macos files.
---
 distribution/archives/build.gradle | 24 ++++++++++++------------
 distribution/build.gradle          | 18 +++++++++++++++---
 distribution/packages/build.gradle |  2 +-
 3 files changed, 28 insertions(+), 16 deletions(-)

diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle
index b48ebeb2c6a..c7c58ad4ebd 100644
--- a/distribution/archives/build.gradle
+++ b/distribution/archives/build.gradle
@@ -113,25 +113,25 @@ task buildIntegTestZip(type: Zip) {
 task buildWindowsZip(type: Zip) {
   configure(commonZipConfig)
   archiveClassifier = 'windows-x86_64'
-  with archiveFiles(modulesFiles(false), 'zip', 'windows', false, true)
+  with archiveFiles(modulesFiles(false, 'windows'), 'zip', 'windows', false, true)
 }
 
 task buildOssWindowsZip(type: Zip) {
   configure(commonZipConfig)
   archiveClassifier = 'windows-x86_64'
-  with archiveFiles(modulesFiles(true), 'zip', 'windows', true, true)
+  with archiveFiles(modulesFiles(true, 'windows'), 'zip', 'windows', true, true)
 }
 
 task buildNoJdkWindowsZip(type: Zip) {
   configure(commonZipConfig)
   archiveClassifier = 'no-jdk-windows-x86_64'
-  with archiveFiles(modulesFiles(false), 'zip', 'windows', false, false)
+  with archiveFiles(modulesFiles(false, 'windows'), 'zip', 'windows', false, false)
 }
 
 task buildOssNoJdkWindowsZip(type: Zip) {
   configure(commonZipConfig)
   archiveClassifier = 'no-jdk-windows-x86_64'
-  with archiveFiles(modulesFiles(true), 'zip', 'windows', true, false)
+  with archiveFiles(modulesFiles(true, 'windows'), 'zip', 'windows', true, false)
 }
 
 Closure commonTarConfig = {
@@ -144,49 +144,49 @@ Closure commonTarConfig = {
 task buildDarwinTar(type: Tar) {
   configure(commonTarConfig)
   archiveClassifier = 'darwin-x86_64'
-  with archiveFiles(modulesFiles(false), 'tar', 'darwin', false, true)
+  with archiveFiles(modulesFiles(false, 'darwin'), 'tar', 'darwin', false, true)
 }
 
 task buildOssDarwinTar(type: Tar) {
   configure(commonTarConfig)
   archiveClassifier = 'darwin-x86_64'
-  with archiveFiles(modulesFiles(true), 'tar', 'darwin', true, true)
+  with archiveFiles(modulesFiles(true, 'darwin'), 'tar', 'darwin', true, true)
 }
 
 task buildNoJdkDarwinTar(type: Tar) {
   configure(commonTarConfig)
   archiveClassifier = 'no-jdk-darwin-x86_64'
-  with archiveFiles(modulesFiles(false), 'tar', 'darwin', false, false)
+  with archiveFiles(modulesFiles(false, 'darwin'), 'tar', 'darwin', false, false)
 }
 
 task buildOssNoJdkDarwinTar(type: Tar) {
   configure(commonTarConfig)
   archiveClassifier = 'no-jdk-darwin-x86_64'
-  with archiveFiles(modulesFiles(true), 'tar', 'darwin', true, false)
+  with archiveFiles(modulesFiles(true, 'darwin'), 'tar', 'darwin', true, false)
 }
 
 task buildLinuxTar(type: Tar) {
   configure(commonTarConfig)
   archiveClassifier = 'linux-x86_64'
-  with archiveFiles(modulesFiles(false), 'tar', 'linux', false, true)
+  with archiveFiles(modulesFiles(false, 'linux'), 'tar', 'linux', false, true)
 }
 
 task buildOssLinuxTar(type: Tar) {
   configure(commonTarConfig)
   archiveClassifier = 'linux-x86_64'
-  with archiveFiles(modulesFiles(true), 'tar', 'linux', true, true)
+  with archiveFiles(modulesFiles(true, 'linux'), 'tar', 'linux', true, true)
 }
 
 task buildNoJdkLinuxTar(type: Tar) {
   configure(commonTarConfig)
   archiveClassifier = 'no-jdk-linux-x86_64'
-  with archiveFiles(modulesFiles(false), 'tar', 'linux', false, false)
+  with archiveFiles(modulesFiles(false, 'linux'), 'tar', 'linux', false, false)
 }
 
 task buildOssNoJdkLinuxTar(type: Tar) {
   configure(commonTarConfig)
   archiveClassifier = 'no-jdk-linux-x86_64'
-  with archiveFiles(modulesFiles(true), 'tar', 'linux', true, false)
+  with archiveFiles(modulesFiles(true, 'linux'), 'tar', 'linux', true, false)
 }
 
 Closure tarExists = { it -> new File('/bin/tar').exists() || new File('/usr/bin/tar').exists() || new File('/usr/local/bin/tar').exists() }
diff --git a/distribution/build.gradle b/distribution/build.gradle
index e27d37bb513..8132729b5a1 100644
--- a/distribution/build.gradle
+++ b/distribution/build.gradle
@@ -305,7 +305,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
       }
     }
 
-    modulesFiles = { oss ->
+    modulesFiles = { oss, platform ->
       copySpec {
         eachFile {
           if (it.relativePath.segments[-2] == 'bin') {
@@ -315,10 +315,22 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
             it.mode = 0644
           }
         }
+        Task buildModules
         if (oss) {
-          from project(':distribution').buildOssModules
+          buildModules = project(':distribution').buildOssModules
         } else {
-          from project(':distribution').buildDefaultModules
+          buildModules = project(':distribution').buildDefaultModules
+        }
+        List excludePlatforms = ['linux', 'windows', 'darwin']
+        if (platform != null) {
+           excludePlatforms.remove(excludePlatforms.indexOf(platform))
+        } else {
+           excludePlatforms = []
+        }
+        from(buildModules) {
+          for (String excludePlatform : excludePlatforms) {
+            exclude "**/platform/${excludePlatform}-x86_64/**"
+          }
         }
       }
     }
diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle
index 1c7fc466ba8..11c56bc66e0 100644
--- a/distribution/packages/build.gradle
+++ b/distribution/packages/build.gradle
@@ -139,7 +139,7 @@ Closure commonPackageConfig(String type, boolean oss, boolean jdk) {
         with libFiles(oss)
       }
       into('modules') {
-        with modulesFiles(oss)
+        with modulesFiles(oss, 'linux')
       }
       if (jdk) {
         into('jdk') {

From 38b43c4a56fa79f0972a50f0862c1761830ec095 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Fri, 19 Apr 2019 17:14:49 -0400
Subject: [PATCH 100/112] Limit the number of forks getting Java versions
 (#41386)

To reduce configuration time, we fork some threads to compute the Java
version for the various configured Javas. However, as the number of
JAVA${N}_HOME variable increases, the current implementation creates as
many threads as there are such variables, which could be more than the
number of physical cores on the machine. It is not likely that we would
see benefits to trying to execute all of these once beyond the number of
physical cores (maybe simultaneous multi-threading helps though, who
knows. Therefore, this commit limits the parallelization here to the
number number of physical cores.
---
 .../org/elasticsearch/gradle/BuildPlugin.groovy    | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
index 8219e1a2870..f1c6721aa0d 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
@@ -197,9 +197,10 @@ class BuildPlugin implements Plugin<Project> {
                 }
             }
 
+            final int numberOfPhysicalCores = numberOfPhysicalCores(project.rootProject)
             if (javaVersions.isEmpty() == false) {
 
-                ExecutorService exec = Executors.newFixedThreadPool(javaVersions.size())
+                ExecutorService exec = Executors.newFixedThreadPool(numberOfPhysicalCores)
                 Set<Future<Void>> results = new HashSet<>()
 
                 javaVersions.entrySet().stream()
@@ -247,7 +248,7 @@ class BuildPlugin implements Plugin<Project> {
             project.rootProject.ext.inFipsJvm = inFipsJvm
             project.rootProject.ext.gradleJavaVersion = JavaVersion.toVersion(gradleJavaVersion)
             project.rootProject.ext.java9Home = "${-> findJavaHome("9")}"
-            project.rootProject.ext.defaultParallel = findDefaultParallel(project.rootProject)
+            project.rootProject.ext.defaultParallel = numberOfPhysicalCores
         }
 
         project.targetCompatibility = project.rootProject.ext.minimumRuntimeVersion
@@ -1024,7 +1025,7 @@ class BuildPlugin implements Plugin<Project> {
         }
     }
 
-    private static int findDefaultParallel(Project project) {
+    private static int numberOfPhysicalCores(Project project) {
         if (project.file("/proc/cpuinfo").exists()) {
             // Count physical cores on any Linux distro ( don't count hyper-threading )
             Map<String, Integer> socketToCore = [:]
@@ -1037,7 +1038,7 @@ class BuildPlugin implements Plugin<Project> {
                     if (name == "physical id") {
                         currentID = value
                     }
-                    // Number  of cores not including hyper-threading
+                    // number of cores not including hyper-threading
                     if (name == "cpu cores") {
                         assert currentID.isEmpty() == false
                         socketToCore[currentID] = Integer.valueOf(value)
@@ -1055,8 +1056,11 @@ class BuildPlugin implements Plugin<Project> {
                 standardOutput = stdout
             }
             return Integer.parseInt(stdout.toString('UTF-8').trim())
+        } else {
+            // guess that it is half the number of processors (which is wrong on systems that do not have simultaneous multi-threading)
+            // TODO: implement this on Windows
+            return Runtime.getRuntime().availableProcessors() / 2
         }
-        return Runtime.getRuntime().availableProcessors() / 2
     }
 
     private static configurePrecommit(Project project) {

From c80f86e3e4d94dc4220a7b268514282bc5acbef2 Mon Sep 17 00:00:00 2001
From: clement-tourriere <clement.tourriere@opendatasoft.com>
Date: Fri, 19 Apr 2019 22:17:00 +0200
Subject: [PATCH 101/112] Add ignore_above in ICUCollationKeywordFieldMapper
 (#40414)

Add the possibility to use ignore_above parameter in ICUCollationKeywordFieldMapper.

Close #40413
---
 docs/plugins/analysis-icu.asciidoc            |  8 ++++
 .../ICUCollationKeywordFieldMapper.java       | 29 ++++++++++--
 .../ICUCollationKeywordFieldMapperTests.java  | 47 +++++++++++++++++++
 3 files changed, 81 insertions(+), 3 deletions(-)

diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc
index 51be1907c98..9dc88967485 100644
--- a/docs/plugins/analysis-icu.asciidoc
+++ b/docs/plugins/analysis-icu.asciidoc
@@ -413,6 +413,14 @@ The following parameters are accepted by `icu_collation_keyword` fields:
     Accepts a string value which is substituted for any explicit `null`
     values.  Defaults to `null`, which means the field is treated as missing.
 
+<<ignore-above,`ignore_above`>>::
+
+    Strings longer than the `ignore_above` setting will be ignored.
+    Checking is performed on the original string before the collation.
+    The `ignore_above` setting can be updated on existing fields
+    using the {ref}/indices-put-mapping.html[PUT mapping API].
+    By default, there is no limit and all values will be indexed.
+
 `store`::
 
     Whether the field value should be stored and retrievable separately from
diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java
index a228283527d..4b29d314356 100644
--- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java
+++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java
@@ -70,6 +70,7 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
         }
 
         public static final String NULL_VALUE = null;
+        public static final int IGNORE_ABOVE = Integer.MAX_VALUE;
     }
 
     public static final class CollationFieldType extends StringFieldType {
@@ -226,6 +227,7 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
         private boolean numeric = false;
         private String variableTop = null;
         private boolean hiraganaQuaternaryMode = false;
+        protected int ignoreAbove = Defaults.IGNORE_ABOVE;
 
         public Builder(String name) {
             super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
@@ -247,6 +249,14 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
             return super.indexOptions(indexOptions);
         }
 
+        public Builder ignoreAbove(int ignoreAbove) {
+            if (ignoreAbove < 0) {
+                throw new IllegalArgumentException("[ignore_above] must be positive, got " + ignoreAbove);
+            }
+            this.ignoreAbove = ignoreAbove;
+            return this;
+        }
+
         public String rules() {
             return rules;
         }
@@ -458,7 +468,7 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
             setupFieldType(context);
             return new ICUCollationKeywordFieldMapper(name, fieldType, defaultFieldType, context.indexSettings(),
                 multiFieldsBuilder.build(this, context), copyTo, rules, language, country, variant, strength, decomposition,
-                alternate, caseLevel, caseFirst, numeric, variableTop, hiraganaQuaternaryMode, collator);
+                alternate, caseLevel, caseFirst, numeric, variableTop, hiraganaQuaternaryMode, ignoreAbove, collator);
         }
     }
 
@@ -480,6 +490,10 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
                         builder.nullValue(fieldNode.toString());
                         iterator.remove();
                         break;
+                    case "ignore_above":
+                        builder.ignoreAbove(XContentMapValues.nodeIntegerValue(fieldNode, -1));
+                        iterator.remove();
+                        break;
                     case "norms":
                         builder.omitNorms(!XContentMapValues.nodeBooleanValue(fieldNode, "norms"));
                         iterator.remove();
@@ -553,13 +567,15 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
     private final boolean numeric;
     private final String variableTop;
     private final boolean hiraganaQuaternaryMode;
+    private int ignoreAbove;
     private final Collator collator;
 
     protected ICUCollationKeywordFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
                                              Settings indexSettings, MultiFields multiFields, CopyTo copyTo, String rules, String language,
                                              String country, String variant,
                                              String strength, String decomposition, String alternate, boolean caseLevel, String caseFirst,
-                                             boolean numeric, String variableTop, boolean hiraganaQuaternaryMode, Collator collator) {
+                                             boolean numeric, String variableTop, boolean hiraganaQuaternaryMode,
+                                             int ignoreAbove, Collator collator) {
         super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
         assert collator.isFrozen();
         this.rules = rules;
@@ -574,6 +590,7 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
         this.numeric = numeric;
         this.variableTop = variableTop;
         this.hiraganaQuaternaryMode = hiraganaQuaternaryMode;
+        this.ignoreAbove = ignoreAbove;
         this.collator = collator;
     }
 
@@ -642,6 +659,8 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
             conflicts.add("Cannot update hiragana_quaternary_mode setting for [" + CONTENT_TYPE + "]");
         }
 
+        this.ignoreAbove = icuMergeWith.ignoreAbove;
+
         if (!conflicts.isEmpty()) {
             throw new IllegalArgumentException("Can't merge because of conflicts: " + conflicts);
         }
@@ -702,6 +721,10 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
         if (includeDefaults || hiraganaQuaternaryMode) {
             builder.field("hiragana_quaternary_mode", hiraganaQuaternaryMode);
         }
+
+        if (includeDefaults || ignoreAbove != Defaults.IGNORE_ABOVE) {
+            builder.field("ignore_above", ignoreAbove);
+        }
     }
 
     @Override
@@ -718,7 +741,7 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
             }
         }
 
-        if (value == null) {
+        if (value == null || value.length() > ignoreAbove) {
             return;
         }
 
diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java
index 103098d5a46..058bd7dbc89 100644
--- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java
+++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java
@@ -403,4 +403,51 @@ public class ICUCollationKeywordFieldMapperTests extends ESSingleNodeTestCase {
         assertEquals("Can't merge because of conflicts: [Cannot update language setting for [" + FIELD_TYPE
             + "], Cannot update strength setting for [" + FIELD_TYPE + "]]", e.getMessage());
     }
+
+
+    public void testIgnoreAbove() throws IOException {
+        String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
+            .startObject("properties").startObject("field").field("type", FIELD_TYPE)
+            .field("ignore_above", 5).endObject().endObject()
+            .endObject().endObject());
+
+        DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
+
+        assertEquals(mapping, mapper.mappingSource().toString());
+
+        ParsedDocument doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference
+            .bytes(XContentFactory.jsonBuilder()
+                .startObject()
+                .field("field", "elk")
+                .endObject()),
+            XContentType.JSON));
+
+        IndexableField[] fields = doc.rootDoc().getFields("field");
+        assertEquals(2, fields.length);
+
+        doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference
+            .bytes(XContentFactory.jsonBuilder()
+                .startObject()
+                .field("field", "elasticsearch")
+                .endObject()),
+            XContentType.JSON));
+
+        fields = doc.rootDoc().getFields("field");
+        assertEquals(0, fields.length);
+    }
+
+    public void testUpdateIgnoreAbove() throws IOException {
+        String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
+            .startObject("properties").startObject("field").field("type", FIELD_TYPE).endObject().endObject()
+            .endObject().endObject());
+
+        indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE);
+
+        mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
+            .startObject("properties").startObject("field").field("type", FIELD_TYPE)
+            .field("ignore_above", 5).endObject().endObject()
+            .endObject().endObject());
+        indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE);
+    }
+
 }

From ac7d5e3e9bab9c79a0414e85e05bfec5a9afe19b Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Fri, 19 Apr 2019 17:23:15 -0400
Subject: [PATCH 102/112] Fix reference to ignore_above from analyis-icu docs

This commit fixes a reference to the docs for ignore_above from the
analysis-icu plugin docs.
---
 docs/plugins/analysis-icu.asciidoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc
index 9dc88967485..b6299139992 100644
--- a/docs/plugins/analysis-icu.asciidoc
+++ b/docs/plugins/analysis-icu.asciidoc
@@ -413,7 +413,7 @@ The following parameters are accepted by `icu_collation_keyword` fields:
     Accepts a string value which is substituted for any explicit `null`
     values.  Defaults to `null`, which means the field is treated as missing.
 
-<<ignore-above,`ignore_above`>>::
+{ref}/ignore-above.html[`ignore_above`]::
 
     Strings longer than the `ignore_above` setting will be ignored.
     Checking is performed on the original string before the collation.

From 2b20bd0b8d9a6c05b85e6b0ab318a501acca7549 Mon Sep 17 00:00:00 2001
From: Joe Zack <me@joezack.com>
Date: Sat, 20 Apr 2019 04:32:35 -0400
Subject: [PATCH 103/112] Fix discovery config in docker-compose docs (#41394)

Today's `docker-compose` docs are missing the `discovery.seed_nodes` config on
one of the nodes. With today's configuration the cluster can still form the
first time it is started, because `cluster.initial_master_nodes` requires both
nodes to bootstrap the cluster which ensures that each discover the other.
However if `es02` is elected master it will remove `es01` from the voting
configuration and then when restarted it will form a cluster on its own without
needing to do any discovery. Meanwhile `es01` doesn't know how to find `es02`
after a restart so will be unable to join this cluster.

This commit fixes this by adding the missing configuration.
---
 docs/reference/setup/install/docker.asciidoc | 1 +
 1 file changed, 1 insertion(+)

diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc
index 9d03edb9e7e..76112f220dc 100644
--- a/docs/reference/setup/install/docker.asciidoc
+++ b/docs/reference/setup/install/docker.asciidoc
@@ -169,6 +169,7 @@ services:
     container_name: es01
     environment:
       - node.name=es01
+      - discovery.seed_hosts=es02
       - cluster.initial_master_nodes=es01,es02
       - cluster.name=docker-cluster
       - bootstrap.memory_lock=true

From 21bf2fe3c4a6c6cc92d8c18457e28f68dca051b3 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Sat, 20 Apr 2019 08:21:10 -0400
Subject: [PATCH 104/112] Reduce security permissions in CCR plugin (#41391)

It looks like these permissions were copy/pasted from another plugin yet
almost none of these permissions are needed for the CCR plugin. This
commit removes all these unneeded permissions from the CCR plugin.
---
 .../plugin-metadata/plugin-security.policy    | 46 -------------------
 1 file changed, 46 deletions(-)

diff --git a/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy
index 45d92fd2b8a..16701ab74d8 100644
--- a/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy
+++ b/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy
@@ -1,50 +1,4 @@
 grant {
-  // needed because of problems in unbound LDAP library
-  permission java.util.PropertyPermission "*", "read,write";
-
-  // required to configure the custom mailcap for watcher
-  permission java.lang.RuntimePermission "setFactory";
-
-  // needed when sending emails for javax.activation
-  // otherwise a classnotfound exception is thrown due to trying
-  // to load the class with the application class loader
-  permission java.lang.RuntimePermission "setContextClassLoader";
-  permission java.lang.RuntimePermission "getClassLoader";
-  // TODO: remove use of this jar as soon as possible!!!!
-  permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries";
-
-  // bouncy castle
-  permission java.security.SecurityPermission "putProviderProperty.BC";
-
-  // needed for x-pack security extension
-  permission java.security.SecurityPermission "createPolicy.JavaPolicy";
-  permission java.security.SecurityPermission "getPolicy";
-  permission java.security.SecurityPermission "setPolicy";
-
   // needed for multiple server implementations used in tests
   permission java.net.SocketPermission "*", "accept,connect";
-
-  // needed for Windows named pipes in machine learning
-  permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write";
 };
-
-grant codeBase "${codebase.netty-common}" {
-   // for reading the system-wide configuration for the backlog of established sockets
-   permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read";
-};
-
-grant codeBase "${codebase.netty-transport}" {
-   // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854
-   // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely!
-   permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write";
-};
-
-grant codeBase "${codebase.elasticsearch-rest-client}" {
-  // rest client uses system properties which gets the default proxy
-  permission java.net.NetPermission "getProxySelector";
-};
-
-grant codeBase "${codebase.httpasyncclient}" {
-  // rest client uses system properties which gets the default proxy
-  permission java.net.NetPermission "getProxySelector";
-};
\ No newline at end of file

From 5c40fc9ba5e470b13d04882e9653aa772fd45039 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Sat, 20 Apr 2019 08:22:56 -0400
Subject: [PATCH 105/112] Remove script engine from X-Pack plugin (#41387)

The X-Pack plugin implements ScriptEngine yet it does not actually
implement any of the methods on the interface, effectively making this a
no-op. This commit removes this interface from the X-Pack plugin.
---
 .../main/java/org/elasticsearch/xpack/core/XPackPlugin.java  | 5 ++---
 .../xpack/core/LocalStateCompositeXPackPlugin.java           | 1 -
 2 files changed, 2 insertions(+), 4 deletions(-)

diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java
index bc861b3904f..b57b648b765 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java
@@ -5,8 +5,8 @@
  */
 package org.elasticsearch.xpack.core;
 
-import org.apache.logging.log4j.Logger;
 import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
 import org.apache.lucene.util.SetOnce;
 import org.elasticsearch.SpecialPermission;
 import org.elasticsearch.Version;
@@ -49,7 +49,6 @@ import org.elasticsearch.persistent.PersistentTaskParams;
 import org.elasticsearch.plugins.EnginePlugin;
 import org.elasticsearch.plugins.ExtensiblePlugin;
 import org.elasticsearch.plugins.RepositoryPlugin;
-import org.elasticsearch.plugins.ScriptPlugin;
 import org.elasticsearch.repositories.Repository;
 import org.elasticsearch.rest.RestController;
 import org.elasticsearch.rest.RestHandler;
@@ -86,7 +85,7 @@ import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import java.util.stream.StreamSupport;
 
-public class XPackPlugin extends XPackClientPlugin implements ScriptPlugin, ExtensiblePlugin, RepositoryPlugin, EnginePlugin {
+public class XPackPlugin extends XPackClientPlugin implements ExtensiblePlugin, RepositoryPlugin, EnginePlugin {
 
     private static Logger logger = LogManager.getLogger(XPackPlugin.class);
     private static DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java
index 1dd07a5df81..9b5414a6f83 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java
@@ -256,7 +256,6 @@ public class LocalStateCompositeXPackPlugin extends XPackPlugin implements Scrip
     @Override
     public List<ScriptContext<?>> getContexts() {
         List<ScriptContext<?>> contexts = new ArrayList<>();
-        contexts.addAll(super.getContexts());
         filterPlugins(ScriptPlugin.class).stream().forEach(p -> contexts.addAll(p.getContexts()));
         return contexts;
     }

From c3e0ae24d377f9fccd6edb326fee995dd2e6a8b4 Mon Sep 17 00:00:00 2001
From: Albert Zaharovits <albert.zaharovits@gmail.com>
Date: Mon, 22 Apr 2019 10:25:24 +0300
Subject: [PATCH 106/112] Fix role mapping DN field wildcards for users with
 NULL DNs (#41343)

The `DistinguishedNamePredicate`, used for matching users to role mapping
expressions, should handle users with null DNs. But it fails to do so (and this is
a NPE bug), if the role mapping expression contains a lucene regexp or a wildcard.

The fix simplifies `DistinguishedNamePredicate` to not handle null DNs at all, and
instead use the `ExpressionModel#NULL_PREDICATE` for the DN field, just like
any other missing user field.
---
 .../authc/support/UserRoleMapper.java         | 25 ++++++-----
 .../DistinguishedNamePredicateTests.java      | 10 ++---
 .../mapper/ExpressionRoleMappingTests.java    | 45 +++++++++++++++++--
 3 files changed, 58 insertions(+), 22 deletions(-)

diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java
index e55530bb5de..dbc32381061 100644
--- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java
@@ -79,8 +79,12 @@ public interface UserRoleMapper {
         public ExpressionModel asModel() {
             final ExpressionModel model = new ExpressionModel();
             model.defineField("username", username);
-            model.defineField("dn", dn, new DistinguishedNamePredicate(dn));
+            if (dn != null) {
+                // null dn fields get the default NULL_PREDICATE
+                model.defineField("dn", dn, new DistinguishedNamePredicate(dn));
+            }
             model.defineField("groups", groups, groups.stream()
+                    .filter(group -> group != null)
                     .<Predicate<FieldExpression.FieldValue>>map(DistinguishedNamePredicate::new)
                     .reduce(Predicate::or)
                     .orElse(fieldValue -> false)
@@ -165,22 +169,19 @@ public interface UserRoleMapper {
         private final DN dn;
 
         public DistinguishedNamePredicate(String string) {
+            assert string != null : "DN string should not be null. Use the dedicated NULL_PREDICATE for every user null field.";
             this.string = string;
             this.dn = parseDn(string);
         }
 
         private static DN parseDn(String string) {
-            if (string == null) {
-                return null;
-            } else {
-                try {
-                    return new DN(string);
-                } catch (LDAPException | LDAPSDKUsageException e) {
-                    if (LOGGER.isTraceEnabled()) {
-                        LOGGER.trace(new ParameterizedMessage("failed to parse [{}] as a DN", string), e);
-                    }
-                    return null;
+            try {
+                return new DN(string);
+            } catch (LDAPException | LDAPSDKUsageException e) {
+                if (LOGGER.isTraceEnabled()) {
+                    LOGGER.trace(new ParameterizedMessage("failed to parse [{}] as a DN", string), e);
                 }
+                return null;
             }
         }
 
@@ -240,7 +241,7 @@ public interface UserRoleMapper {
                 }
                 return testString.equalsIgnoreCase(dn.toNormalizedString());
             }
-            return string == null && fieldValue.getValue() == null;
+            return false;
         }
     }
 }
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java
index d04f0ad7f93..51ea82fc0e4 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java
@@ -49,27 +49,25 @@ public class DistinguishedNamePredicateTests extends ESTestCase {
     }
 
     public void testParsingMalformedInput() {
-        Predicate<FieldValue> predicate = new UserRoleMapper.DistinguishedNamePredicate(null);
-        assertPredicate(predicate, null, true);
-        assertPredicate(predicate, "", false);
-        assertPredicate(predicate, randomAlphaOfLengthBetween(1, 8), false);
-
-        predicate = new UserRoleMapper.DistinguishedNamePredicate("");
+        Predicate<FieldValue> predicate = new UserRoleMapper.DistinguishedNamePredicate("");
         assertPredicate(predicate, null, false);
         assertPredicate(predicate, "", true);
         assertPredicate(predicate, randomAlphaOfLengthBetween(1, 8), false);
+        assertPredicate(predicate, randomAlphaOfLengthBetween(1, 8) + "*", false);
 
         predicate = new UserRoleMapper.DistinguishedNamePredicate("foo=");
         assertPredicate(predicate, null, false);
         assertPredicate(predicate, "foo", false);
         assertPredicate(predicate, "foo=", true);
         assertPredicate(predicate, randomAlphaOfLengthBetween(5, 12), false);
+        assertPredicate(predicate, randomAlphaOfLengthBetween(5, 12) + "*", false);
 
         predicate = new UserRoleMapper.DistinguishedNamePredicate("=bar");
         assertPredicate(predicate, null, false);
         assertPredicate(predicate, "bar", false);
         assertPredicate(predicate, "=bar", true);
         assertPredicate(predicate, randomAlphaOfLengthBetween(5, 12), false);
+        assertPredicate(predicate, randomAlphaOfLengthBetween(5, 12) + "*", false);
     }
 
     private void assertPredicate(Predicate<FieldValue> predicate, Object value, boolean expected) {
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java
index 57db6005119..42652676d39 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java
@@ -31,6 +31,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig;
 import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping;
 import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName;
 import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AllExpression;
+import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AnyExpression;
 import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression;
 import org.elasticsearch.xpack.security.authc.support.UserRoleMapper;
 import org.hamcrest.Matchers;
@@ -59,7 +60,7 @@ public class ExpressionRoleMappingTests extends ESTestCase {
             Settings.EMPTY, Mockito.mock(Environment.class), new ThreadContext(Settings.EMPTY));
     }
 
-    public void testParseValidJsonWithFixedRoleNames() throws Exception {
+    public void testValidExpressionWithFixedRoleNames() throws Exception {
         String json = "{"
             + "\"roles\": [  \"kibana_user\", \"sales\" ], "
             + "\"enabled\": true, "
@@ -69,7 +70,7 @@ public class ExpressionRoleMappingTests extends ESTestCase {
             + "    { \"except\": { \"field\": { \"metadata.active\" : false } } }"
             + "  ]}"
             + "}";
-        final ExpressionRoleMapping mapping = parse(json, "ldap_sales");
+        ExpressionRoleMapping mapping = parse(json, "ldap_sales");
         assertThat(mapping.getRoles(), Matchers.containsInAnyOrder("kibana_user", "sales"));
         assertThat(mapping.getExpression(), instanceOf(AllExpression.class));
 
@@ -96,12 +97,48 @@ public class ExpressionRoleMappingTests extends ESTestCase {
             Collections.emptyList(), Collections.singletonMap("active", true), realm
         );
 
+        final UserRoleMapper.UserData user4 = new UserRoleMapper.UserData(
+                "peter.null", null, Collections.emptyList(), Collections.singletonMap("active", true), realm
+        );
+
         assertThat(mapping.getExpression().match(user1a.asModel()), equalTo(true));
         assertThat(mapping.getExpression().match(user1b.asModel()), equalTo(true));
         assertThat(mapping.getExpression().match(user1c.asModel()), equalTo(true));
         assertThat(mapping.getExpression().match(user1d.asModel()), equalTo(true));
-        assertThat(mapping.getExpression().match(user2.asModel()), equalTo(false));
-        assertThat(mapping.getExpression().match(user3.asModel()), equalTo(false));
+        assertThat(mapping.getExpression().match(user2.asModel()), equalTo(false)); // metadata.active == false
+        assertThat(mapping.getExpression().match(user3.asModel()), equalTo(false)); // dn != ou=sales,dc=example,dc=com
+        assertThat(mapping.getExpression().match(user4.asModel()), equalTo(false)); // dn == null
+
+        // expression without dn
+        json = "{"
+                + "\"roles\": [  \"superuser\", \"system_admin\", \"admin\" ], "
+                + "\"enabled\": true, "
+                + "\"rules\": { "
+                + "  \"any\": [ "
+                + "    { \"field\": { \"username\" : \"tony.stark\" } }, "
+                + "    { \"field\": { \"groups\": \"cn=admins,dc=stark-enterprises,dc=com\" } }"
+                + "  ]}"
+                + "}";
+        mapping = parse(json, "stark_admin");
+            assertThat(mapping.getRoles(), Matchers.containsInAnyOrder("superuser", "system_admin", "admin"));
+            assertThat(mapping.getExpression(), instanceOf(AnyExpression.class));
+
+        final UserRoleMapper.UserData userTony = new UserRoleMapper.UserData(
+                "tony.stark", null, Collections.singletonList("Audi R8 owners"), Collections.singletonMap("boss", true), realm
+        );
+        final UserRoleMapper.UserData userPepper = new UserRoleMapper.UserData(
+                "pepper.potts", null, Arrays.asList("marvel", "cn=admins,dc=stark-enterprises,dc=com"), null, realm
+        );
+        final UserRoleMapper.UserData userMax = new UserRoleMapper.UserData(
+                "max.rockatansky", null, Collections.singletonList("bronze"), Collections.singletonMap("mad", true), realm
+        );
+        final UserRoleMapper.UserData userFinn = new UserRoleMapper.UserData(
+                "finn.hackleberry", null, Arrays.asList("hacker", null), null, realm
+        );
+        assertThat(mapping.getExpression().match(userTony.asModel()), equalTo(true));
+        assertThat(mapping.getExpression().match(userPepper.asModel()), equalTo(true));
+        assertThat(mapping.getExpression().match(userMax.asModel()), equalTo(false));
+        assertThat(mapping.getExpression().match(userFinn.asModel()), equalTo(false));
     }
 
     public void testParseValidJsonWithTemplatedRoleNames() throws Exception {

From c7a74938049564cdf30028e7d52fa69c82f60a17 Mon Sep 17 00:00:00 2001
From: James Rodewig <james.rodewig@elastic.co>
Date: Mon, 22 Apr 2019 05:26:36 -0700
Subject: [PATCH 107/112] [DOCS] Remove abbrevtitles for Asciidoctor migration
 (#41366)

---
 docs/reference/release-notes/highlights.asciidoc | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc
index 1e2cb4c231c..38501b4a795 100644
--- a/docs/reference/release-notes/highlights.asciidoc
+++ b/docs/reference/release-notes/highlights.asciidoc
@@ -1,8 +1,5 @@
 [[release-highlights]]
-= {es} Release highlights
-++++
-<titleabbrev>Release highlights</titleabbrev>
-++++
+= Release highlights
 
 [partintro]
 --

From d7cddf815740c8177802caf3eb40d598255aa66a Mon Sep 17 00:00:00 2001
From: James Rodewig <james.rodewig@elastic.co>
Date: Mon, 22 Apr 2019 05:39:04 -0700
Subject: [PATCH 108/112] [DOCS] Replace nested open block for Asciidoctor
 migration (#41168)

* [DOCS] Fix nested open blocks for Asciidoctor migration

* [DOCS] Reformat table to definitions
---
 docs/reference/index-modules.asciidoc | 29 +++++++++------------------
 1 file changed, 10 insertions(+), 19 deletions(-)

diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc
index 7848a48fa58..96ccc9ab6a8 100644
--- a/docs/reference/index-modules.asciidoc
+++ b/docs/reference/index-modules.asciidoc
@@ -45,26 +45,17 @@ specific index module:
     part of the cluster.
 
 `index.shard.check_on_startup`::
-+
---
+
 Whether or not shards should be checked for corruption before opening. When
-corruption is detected, it will prevent the shard from being opened. Accepts:
-
-`false`::
-
-    (default) Don't check for corruption when opening a shard.
-
-`checksum`::
-
-    Check for physical corruption.
-
-`true`::
-
-    Check for both physical and logical corruption. This is much more
-    expensive in terms of CPU and memory usage.
-
-WARNING: Expert only. Checking shards may take a lot of time on large indices.
---
+corruption is detected, it will prevent the shard from being opened.
+Accepts:
+`false`::: (default) Don't check for corruption when opening a shard.
+`checksum`::: Check for physical corruption.
+`true`::: Check for both physical and logical corruption. This is much more
+expensive in terms of CPU and memory usage.
++
+WARNING: Expert only. Checking shards may take a lot of time on large
+indices.
 
 [[index-codec]] `index.codec`::
 

From b8d054e73be611d1022be6ef2d909b3430a1886c Mon Sep 17 00:00:00 2001
From: Ioannis Kakavas <ikakavas@protonmail.com>
Date: Mon, 22 Apr 2019 15:38:41 +0300
Subject: [PATCH 109/112] OpenID Connect realm settings and rest API docs
 (#40740)

This commit adds the relevant docs for the OpenID Connect
realm settings and the REST APIs that are exposed.
---
 .../settings/security-settings.asciidoc       | 244 +++++++++++++++++-
 x-pack/docs/en/rest-api/security.asciidoc     |  12 +
 .../security/oidc-authenticate-api.asciidoc   |  68 +++++
 .../security/oidc-logout-api.asciidoc         |  53 ++++
 .../oidc-prepare-authentication-api.asciidoc  | 128 +++++++++
 5 files changed, 503 insertions(+), 2 deletions(-)
 create mode 100644 x-pack/docs/en/rest-api/security/oidc-authenticate-api.asciidoc
 create mode 100644 x-pack/docs/en/rest-api/security/oidc-logout-api.asciidoc
 create mode 100644 x-pack/docs/en/rest-api/security/oidc-prepare-authentication-api.asciidoc

diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc
index b767b7869db..04cef7e199f 100644
--- a/docs/reference/settings/security-settings.asciidoc
+++ b/docs/reference/settings/security-settings.asciidoc
@@ -1074,8 +1074,7 @@ they cannot have individual passwords.
 
 If you are loading the IdP metadata over SSL/TLS (that is, `idp.metadata.path` 
 is a URL using the `https` protocol), the following settings can be used to 
-configure SSL. If these are not specified, then the 
-<<ssl-tls-settings,default SSL settings>> are used.
+configure SSL.
 
 NOTE: These settings are not used for any purpose other than loading metadata 
 over https.
@@ -1204,6 +1203,247 @@ If this setting is used, then the Kerberos realm does not perform role mapping a
 instead loads the user from the listed realms.
 See {stack-ov}/realm-chains.html#authorization_realms[Delegating authorization to another realm]
 
+[[ref-oidc-settings]]
+[float]
+===== OpenID Connect realm settings
+
+In addition to the <<ref-realm-settings,settings that are valid for all realms>>, you
+can specify the following settings:
+
+`op.issuer`::
+A verifiable Identifier for your OpenID Connect Provider. An Issuer
+Identifier is usually a case sensitive URL using the https scheme that contains
+scheme, host, and optionally, port number and path components and no query or
+fragment components. The value for this setting should be provided by your OpenID
+Connect Provider.
+
+`op.authorization_endpoint`::
+The URL for the Authorization Endpoint at the
+OpenID Connect Provider. The value for this setting should be provided by your OpenID
+Connect Provider.
+
+`op.token_endpoint`::
+The URL for the Token Endpoint at the OpenID Connect Provider.
+The value for this setting should be provided by your OpenID Connect Provider.
+
+`op.userinfo_endpoint`::
+The URL for the User Info Endpoint at the OpenID Connect Provider.
+The value for this setting should be provided by your OpenID Connect Provider.
+
+`op.endsession_endpoint`::
+The URL for the End Session Endpoint at the OpenID Connect
+Provider. The value for this setting should be provided by your OpenID Connect Provider.
+
+`op.jwkset_path`::
+The path or URL to a JSON Web Key Set with the key material that the OpenID Connect
+Provider uses for signing tokens and claims responses.
+If a path is provided, then it is resolved relative to the {es} config
+directory.
+If a URL is provided, then it must be either a `file` URL or a `https` URL.
+{es} automatically caches the retrieved JWK set to avoid unnecessary HTTP
+requests but will attempt to refresh the JWK upon signature verification
+failure, as this might indicate that the OpenID Connect Provider has
+rotated the signing keys.
+
+File based resources are polled at a frequency determined by the global {es}
+`resource.reload.interval.high` setting, which defaults to 5 seconds.
+
+`rp.client_id`::
+The OAuth 2.0 Client Identifier that was assigned to {es} during registration
+at the OpenID Connect Provider
+
+`rp.client_secret`(<<secure-settings,Secure>>)::
+The OAuth 2.0 Client Secret that was assigned to {es} during registration
+at the OpenID Connect Provider
+
+`rp.redirect_uri`::
+The Redirect URI within {kib}. Typically this is the
+"api/security/v1/oidc" endpoint of your Kibana server. For example,
+`https://kibana.example.com/api/security/v1/oidc`.
+
+`rp.response_type`::
+OAuth 2.0 Response Type value that determines the authorization
+processing flow to be used. Can be `code` for authorization code grant flow,
+or one of `id_token`, `id_token token` for the implicit flow.
+
+`rp.signature_algorithm`::
+The signature algorithm that will be used by {es} in order to verify the
+signature of the id tokens it will receive from the OpenID Connect Provider.
+Defaults to `RSA256`
+
+`rp.requested_scopes`::
+The scope values that will be requested by the OpenID Connect Provider as
+part of the Authentication Request. Optional, defaults to `openid`
+
+`rp.post_logout_redirect_uri`::
+The Redirect URI (usually within {kib}) that the OpenID Connect Provider
+should redirect the browser to after a successful Single Logout.
+
+`claims.principal`::
+The name of the OpenID Connect claim that contains the user's principal (username).
+
+`claims.groups`::
+The name of the OpenID Connect claim that contains the user's groups.
+
+`claims.name`::
+The name of the OpenID Connect claim that contains the user's full name.
+
+`claims.mail`::
+The name of the OpenID Connect claim that contains the user's email address.
+
+`claims.dn`::
+The name of the OpenID Connect claim that contains the user's X.509
+_Distinguished Name_.
+
+`claim_patterns.principal`::
+A Java regular expression that is matched against the OpenID Connect claim specified
+by `claims.principal` before it is applied to the user's _principal_ property.
+The attribute value must match the pattern and the value of the first
+_capturing group_ is used as the principal. For example, `^([^@]+)@example\\.com$`
+matches email addresses from the "example.com" domain and uses the local-part as
+the principal.
+
+`claim_patterns.groups`::
+As per `claim_patterns.principal`, but for the _group_ property.
+
+`claim_patterns.name`::
+As per `claim_patterns.principal`, but for the _name_ property.
+
+`claim_patterns.mail`::
+As per `claim_patterns.principal`, but for the _mail_ property.
+
+`claim_patterns.dn`::
+As per `claim_patterns.principal`, but for the _dn_ property.
+
+
+`allowed_clock_skew`::
+The maximum allowed clock skew to be taken into consideration when validating
+id tokens with regards to their creation and expiration times.
+
+`populate_user_metadata`::
+Specifies whether to populate the {es} user's metadata with the values that are
+provided by the OpenID Connect claims. Defaults to `true`.
+
+`http.connect_timeout`::
+Controls the behavior of the http client used for back-channel communication to
+the OpenID Connect Provider endpoints. Specifies the timeout until a connection
+ is established. A value of zero means the timeout is not used. Defaults to `5s`
+
+`http.connection_read_timeout`::
+Controls the behavior of the http client used for back-channel communication to
+the OpenID Connect Provider endpoints. Specifies the timeout used when
+requesting a connection from the connection manager. Defaults to `5s`
+
+`http.socket_timeout`::
+Controls the behavior of the http client used for back-channel communication to
+the OpenID Connect Provider endpoints. Specifies the socket timeout (SO_TIMEOUT)
+in milliseconds, which is the timeout for waiting for data or, put differently,
+a maximum period inactivity between two consecutive data packets). Defaults to
+`5s`
+
+`http.max_connections`::
+Controls the behavior of the http client used for back-channel communication to
+the OpenID Connect Provider endpoints. Specifies the maximum number of
+connections allowed across all endpoints.
+
+`http.max_endpoint_connections`::
+Controls the behavior of the http client used for back-channel communication to
+the OpenID Connect Provider endpoints. Specifies the maximum number of
+connections allowed per endpoint.
+
+[float]
+[[ref-oidc-ssl-settings]]
+===== OpenID Connect realm SSL settings
+
+The following settings can be used to configure SSL for all outgoing http connections
+to the OpenID Connect Provider endpoints.
+
+NOTE: These settings are _only_ used for the back-channel communication between
+{es} and the OpenID Connect Provider
+
+`ssl.key`::
+Specifies the path to the PEM encoded private key to use for http client
+authentication (if required). `ssl.key` and `ssl.keystore.path` cannot be used
+at the same time.
+
+`ssl.key_passphrase`::
+Specifies the passphrase to decrypt the PEM encoded private key (`ssl.key`) if it is
+encrypted. Cannot be used with `ssl.secure_key_passphrase`.
+
+`ssl.secure_key_passphrase` (<<secure-settings,Secure>>)::
+Specifies the passphrase to decrypt the PEM encoded private key (`ssl.key`) if it is
+encrypted. Cannot be used with `ssl.key_passphrase`.
+
+`ssl.certificate`::
+Specifies the path to the PEM encoded certificate (or certificate chain) that is associated
+with the key (`ssl.key`). This setting can be used only if `ssl.key` is set.
+
+`ssl.certificate_authorities`::
+Specifies the paths to the PEM encoded certificate authority certificates that should be
+trusted. `ssl.certificate_authorities` and `ssl.truststore.path` cannot be
+used at the same time.
+
+`ssl.keystore.path`::
+Specifies the path to the keystore that contains a private key and certificate.
+Must be either a Java Keystore (jks) or a PKCS#12 file.
+`ssl.key` and `ssl.keystore.path` cannot be used at the same time.
+
+`ssl.keystore.type`::
+The type of the keystore (`ssl.keystore.path`). Must be either `jks` or `PKCS12`.
+If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting defaults
+to `PKCS12`. Otherwise, it defaults to `jks`.
+
+`ssl.keystore.password`::
+The password to the keystore (`ssl.keystore.path`). This setting cannot be used
+with `ssl.keystore.secure_password`.
+
+`ssl.keystore.secure_password` (<<secure-settings,Secure>>)::
+The password to the keystore (`ssl.keystore.path`).
+This setting cannot be used with `ssl.keystore.password`.
+
+`ssl.keystore.key_password`::
+The password for the key in the keystore (`ssl.keystore.path`).
+Defaults to the keystore password. This setting cannot be used with
+`ssl.keystore.secure_key_password`.
+
+`ssl.keystore.secure_key_password` (<<secure-settings,Secure>>)::
+The password for the key in the keystore (`ssl.keystore.path`).
+Defaults to the keystore password. This setting cannot be used with
+`ssl.keystore.key_password`.
+
+`ssl.truststore.path`::
+The path to the keystore that contains the certificates to trust.
+Must be either a Java Keystore (jks) or a PKCS#12 file.
+`ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the
+same time.
+
+`ssl.truststore.type`::
+The type of the truststore (`ssl.truststore.path`). Must be either `jks` or
+`PKCS12`. If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting
+defaults to `PKCS12`. Otherwise, it defaults to `jks`.
+
+`ssl.truststore.password`::
+The password to the truststore (`ssl.truststore.path`). This setting cannot be
+used with `ssl.truststore.secure_password`.
+
+`ssl.truststore.secure_password` (<<secure-settings,Secure>>)::
+The password to the truststore (`ssl.truststore.path`). This setting cannot be
+used with `ssl.truststore.password`.
+
+`ssl.verification_mode`::
+One of `full` (verify the hostname and the certificate path), `certificate` (verify the
+certificate path, but not the hostname) or `none` (perform no verification).
+Defaults to `full`.
++
+See <<ssl-tls-settings,`ssl.verification_mode`>> for a more detailed explanation of these values.
+
+`ssl.supported_protocols`::
+Specifies the supported protocols for TLS/SSL. Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if
+the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`.
+
+`ssl.cipher_suites`::
+Specifies the cipher suites that should be supported.
+
 [float]
 [[load-balancing]]
 ===== Load balancing and failover
diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc
index 7e14a6a0ee9..c04bae90801 100644
--- a/x-pack/docs/en/rest-api/security.asciidoc
+++ b/x-pack/docs/en/rest-api/security.asciidoc
@@ -76,6 +76,15 @@ native realm:
 * <<security-api-enable-user,Enable users>>
 * <<security-api-get-user,Get users>>
 
+=== OpenID Connect
+
+You can use the following APIs to authenticate users against an OpenID Connect
+authentication realm
+
+* <<security-api-oidc-prepare-authentication, Prepare an authentication request>>
+* <<security-api-oidc-authenticate, Submit an authentication response>>
+* <<security-api-oidc-logout, Logout an authenticated user>>
+
 
 include::security/authenticate.asciidoc[]
 include::security/change-password.asciidoc[]
@@ -102,3 +111,6 @@ include::security/has-privileges.asciidoc[]
 include::security/invalidate-api-keys.asciidoc[]
 include::security/invalidate-tokens.asciidoc[]
 include::security/ssl.asciidoc[]
+include::security/oidc-prepare-authentication-api.asciidoc[]
+include::security/oidc-authenticate-api.asciidoc[]
+include::security/oidc-logout-api.asciidoc[]
diff --git a/x-pack/docs/en/rest-api/security/oidc-authenticate-api.asciidoc b/x-pack/docs/en/rest-api/security/oidc-authenticate-api.asciidoc
new file mode 100644
index 00000000000..0efb2b23145
--- /dev/null
+++ b/x-pack/docs/en/rest-api/security/oidc-authenticate-api.asciidoc
@@ -0,0 +1,68 @@
+[role="xpack"]
+[[security-api-oidc-authenticate]]
+
+=== OpenID Connect Authenticate API
+
+Submits the response to an oAuth 2.0 authentication request for consumption from {es}. Upon successful validation, {es}
+will respond with an {es} internal Access Token and Refresh Token that can be subsequently used for authentication. This
+API endpoint basically exchanges successful OpenID Connect Authentication responses for {es} access and refresh tokens
+to be used for authentication.
+
+{es} exposes all the necessary OpenID Connect related functionality via the OpenID Connect APIs. These APIs
+are used internally by {kib} in order to provide OpenID Connect based authentication, but can also be used by other,
+custom web applications or other clients. See also
+<<security-api-oidc-prepare-authentication,OpenID Connect Prepare Authentication API>> and
+<<security-api-oidc-logout,OpenID Connect Logout API>>
+
+==== Request
+
+`POST /_security/oidc/authenticate`
+
+==== Request Body
+
+`redirect_uri`::
+The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request, after a
+ successful authentication. This URL is expected to be provided as-is (URL encoded), taken from the body of the response
+ or as the value of a `Location` header in the response from the OpenID Connect Provider.
+
+`state`::
+String value used to maintain state between the authentication request and the response. This value needs to be the same
+as the one that was provided to the call to `/_security/oidc/prepare` earlier, or the one that was generated by {es}
+and included in the response to that call.
+
+`nonce`::
+String value used to associate a Client session with an ID Token, and to mitigate replay attacks. This value needs to be
+the same as the one that was provided to the call to `/_security/oidc/prepare` earlier, or the one that was generated by {es}
+and included in the response to that call.
+
+==== Examples
+
+The following example request exchanges the response that was returned from the OpenID Connect Provider after a successful
+authentication, for an {es} access token and refresh token to be used in subsequent requests. This example is from an
+authentication that uses the authorization code grant flow.
+
+[source,js]
+--------------------------------------------------
+POST /_security/oidc/authenticate
+{
+  "redirect_uri" : "https://oidc-kibana.elastic.co:5603/api/security/v1/oidc?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I",
+  "state" : "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I",
+  "nonce" : "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[skip:These are properly tested in the OpenIDConnectIT suite]
+
+The following example output contains the access token that was generated in response, the amount of time (in
+seconds) that the token expires in, the type, and the refresh token:
+
+[source,js]
+--------------------------------------------------
+{
+  "access_token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==",
+  "type" : "Bearer",
+  "expires_in" : 1200,
+  "refresh_token": "vLBPvmAB6KvwvJZr27cS"
+}
+--------------------------------------------------
+// NOTCONSOLE
diff --git a/x-pack/docs/en/rest-api/security/oidc-logout-api.asciidoc b/x-pack/docs/en/rest-api/security/oidc-logout-api.asciidoc
new file mode 100644
index 00000000000..6f5288a135f
--- /dev/null
+++ b/x-pack/docs/en/rest-api/security/oidc-logout-api.asciidoc
@@ -0,0 +1,53 @@
+[role="xpack"]
+[[security-api-oidc-logout]]
+
+=== OpenID Connect Logout API
+
+Submits a request to invalidate a refresh token and an access token that was generated as a response to a call to
+`/_security/oidc/authenticate`. If the OpenID Connect authentication realm in {es} is accordingly configured, the
+response to this call will contain a URI pointing to the End Session Endpoint of the OpenID Connect Provider in
+order to perform Single Logout
+
+{es} exposes all the necessary OpenID Connect related functionality via the OpenID Connect APIs. These APIs
+are used internally by {kib} in order to provide OpenID Connect based authentication, but can also be used by other,
+custom web applications or other clients. See also <<security-api-oidc-authenticate,OpenID Connect Authenticate API>>
+and <<security-api-oidc-prepare-authentication,OpenID Connect Prepare Authentication API>>
+
+==== Request
+
+`POST /_security/oidc/logout`
+
+==== Request Body
+
+`access_token`::
+The value of the access token to be invalidated as part of the logout.
+
+`refresh_token`::
+The value of the refresh token to be invalidated as part of the logout. (Optional)
+
+
+==== Examples
+
+The following example performs logout
+
+[source,js]
+--------------------------------------------------
+POST /_security/oidc/logout
+{
+  "token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==",
+  "refresh_token": "vLBPvmAB6KvwvJZr27cS"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[skip:These are properly tested in the OpenIDConnectIT suite]
+
+The following example output of the response contains the URI pointing to the End Session Endpoint of the
+OpenID Connect Provider with all the parameters of the Logout Request, as HTTP GET parameters
+
+[source,js]
+--------------------------------------------------
+{
+  "redirect" : "https://op-provider.org/logout?id_token_hint=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c&post_logout_redirect_uri=http%3A%2F%2Foidc-kibana.elastic.co%2Floggedout&state=lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO"
+}
+--------------------------------------------------
+// NOTCONSOLE
\ No newline at end of file
diff --git a/x-pack/docs/en/rest-api/security/oidc-prepare-authentication-api.asciidoc b/x-pack/docs/en/rest-api/security/oidc-prepare-authentication-api.asciidoc
new file mode 100644
index 00000000000..aeb400ce97e
--- /dev/null
+++ b/x-pack/docs/en/rest-api/security/oidc-prepare-authentication-api.asciidoc
@@ -0,0 +1,128 @@
+[role="xpack"]
+[[security-api-oidc-prepare-authentication]]
+
+=== OpenID Connect Prepare Authentication API
+
+Creates an oAuth 2.0 authentication request as a URL string based on the configuration of the respective
+OpenID Connect authentication realm in {es}. The response of this API is a URL pointing to the Authorization Endpoint
+of the configured OpenID Connect Provider and can be used to redirect the browser of the user in order to continue
+the authentication process.
+
+{es} exposes all the necessary OpenID Connect related functionality via the OpenID Connect APIs. These APIs
+are used internally by {kib} in order to provide OpenID Connect based authentication, but can also be used by other,
+custom web applications or other clients. See also <<security-api-oidc-authenticate,OpenID Connect Authenticate API>>
+and <<security-api-oidc-logout,OpenID Connect Logout API>>
+
+==== Request
+
+`POST /_security/oidc/prepare`
+
+
+==== Request Body
+
+The following parameters can be specified in the body of the request:
+
+`realm`::
+The name of the OpenID Connect realm in {es} the configuration of which should be used in order to
+generate the authentication request. Cannot be specified when `iss` is specified.
+
+`state`::
+String value used to maintain state between the authentication request and the response, typically used
+as a Cross-Site Request Forgery mitigation. If the caller of the API doesn't provide a value, {es} will
+generate one with sufficient entropy itself and return it in the response.
+
+`nonce`::
+String value used to associate a Client session with an ID Token, and to mitigate replay attacks.
+If the caller of the API doesn't provide a value, {es} will generate one with sufficient entropy itself
+and return it in the response.
+
+`issuer`::
+In the case of a 3rd Party initiated Single Sign On, this is the Issuer Identifier for the OP that the RP is
+to send the Authentication Request to. Cannot be specified when `realm` is specified.
+
+`login_hint`::
+In the case of a 3rd Party initiated Single Sign On, a string value to be included in the authentication
+request, as the `login_hint` parameter. This parameter is not valid when `realm` is specified
+
+
+==== Examples
+
+The following example generates an authentication request for the OpenID Connect Realm `oidc1`
+
+[source,js]
+--------------------------------------------------
+POST /_security/oidc/prepare
+{
+  "realm" : "oidc1"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[skip:These are properly tested in the OpenIDConnectIT suite]
+
+The following example output of the response contains the URI pointing to the Authorization Endpoint of the
+OpenID Connect Provider with all the parameters of the Authentication Request, as HTTP GET parameters
+
+[source,js]
+--------------------------------------------------
+{
+  "redirect" : "https://op-provider.org/login?scope=openid&response_type=code&redirect_uri=http%3A%2F%2Foidc-kibana.elastic.co%3A5603%2Fkmi%2Fapi%2Fsecurity%2Fv1%2Foidc&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I&nonce=WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM&client_id=0o43gasov3TxMWJOt839",
+  "state" : "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I",
+  "nonce" : "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM"
+}
+--------------------------------------------------
+// NOTCONSOLE
+
+The following example generates an authentication request for the OpenID Connect Realm `oidc1`, where the
+values for the state and the nonce have been generated by the client
+
+[source,js]
+--------------------------------------------------
+POST /_security/oidc/prepare
+{
+  "realm" : "oidc1",
+  "state" : "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO",
+  "nonce" : "zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[skip:These are properly tested in the OpenIDConnectIT suite]
+
+The following example output of the response contains the URI pointing to the Authorization Endpoint of the
+OpenID Connect Provider with all the parameters of the Authentication Request, as HTTP GET parameters
+
+[source,js]
+--------------------------------------------------
+{
+  "redirect" : "https://op-provider.org/login?scope=openid&response_type=code&redirect_uri=http%3A%2F%2Foidc-kibana.elastic.co%3A5603%2Fkmi%2Fapi%2Fsecurity%2Fv1%2Foidc&state=lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO&nonce=zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5&client_id=0o43gasov3TxMWJOt839",
+  "state" : "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO",
+  "nonce" : "zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5"
+}
+--------------------------------------------------
+// NOTCONSOLE
+
+The following example generates an authentication request for a 3rd party initiated single sign on, specifying the
+issuer that should be used for matching the appropriate OpenID Connect Authentication realm
+
+[source,js]
+--------------------------------------------------
+POST /_security/oidc/prepare
+{
+  "issuer" : "https://op-issuer.org:8800",
+  "login_hint": "this_is_an_opaque_string"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[skip:These are properly tested in the OpenIDConnectIT suite]
+
+The following example output of the response contains the URI pointing to the Authorization Endpoint of the
+OpenID Connect Provider with all the parameters of the Authentication Request, as HTTP GET parameters
+
+[source,js]
+--------------------------------------------------
+{
+  "redirect" : "https://op-provider.org/login?scope=openid&response_type=code&redirect_uri=http%3A%2F%2Foidc-kibana.elastic.co%3A5603%2Fkmi%2Fapi%2Fsecurity%2Fv1%2Foidc&state=lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO&nonce=zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5&client_id=0o43gasov3TxMWJOt839&login_hint=this_is_an_opaque_string",
+  "state" : "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I",
+  "nonce" : "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM"
+}
+--------------------------------------------------
+// NOTCONSOLE
\ No newline at end of file

From 490ad2af46ac47a46a3212edb96a86ec8a1cc262 Mon Sep 17 00:00:00 2001
From: James Rodewig <james.rodewig@elastic.co>
Date: Mon, 22 Apr 2019 06:07:53 -0700
Subject: [PATCH 110/112] [DOCS] Remove inline callouts for Asciidoctor
 migration (#41266)

---
 docs/reference/sql/endpoints/jdbc.asciidoc | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/docs/reference/sql/endpoints/jdbc.asciidoc b/docs/reference/sql/endpoints/jdbc.asciidoc
index 7b1169d34d3..9014d3f5719 100644
--- a/docs/reference/sql/endpoints/jdbc.asciidoc
+++ b/docs/reference/sql/endpoints/jdbc.asciidoc
@@ -51,14 +51,21 @@ Once registered, the driver understands the following syntax as an URL:
 
 ["source","text",subs="attributes"]
 ----
-jdbc:es://<1>[[http|https]://]*<2>[host[:port]]*<3>/[prefix]*<4>[?[option=value]&<5>]*
+jdbc:es://[[http|https]://]*[host[:port]]*/[prefix]*<[?[option=value]&]*
 ----
+`jdbc:es://`:: Prefix. Mandatory.
 
-<1> `jdbc:es://` prefix. Mandatory.
-<2> type of HTTP connection to make - `http` (default) or `https`. Optional.
-<3> host (`localhost` by default) and port (`9200` by default). Optional.
-<4> prefix (empty by default). Typically used when hosting {es} under a certain path. Optional.
-<5> Properties for the JDBC driver. Empty by default. Optional.
+`[[http|https]://]`:: Type of HTTP connection to make. Possible values are
+`http` (default) or `https`. Optional.
+
+`[host[:port]]`:: Host (`localhost` by default) and port (`9200` by default).
+Optional.
+
+`[prefix]`:: Prefix (empty by default). Typically used when hosting {es} under
+a certain path. Optional.
+
+`[option=value]`:: Properties for the JDBC driver. Empty by default.
+Optional.
 
 The driver recognized the following properties:
 

From 3c60f967af6096026b9a3edb22083de8cf777d62 Mon Sep 17 00:00:00 2001
From: James Rodewig <james.rodewig@elastic.co>
Date: Mon, 22 Apr 2019 06:22:41 -0700
Subject: [PATCH 111/112] [DOCS] Remove inline callouts in SQL Command docs for
 Asciidoctor migration (#41276)

---
 .../sql/language/syntax/commands/describe-table.asciidoc  | 8 ++++++--
 .../sql/language/syntax/commands/show-columns.asciidoc    | 4 +++-
 .../sql/language/syntax/commands/show-functions.asciidoc  | 2 +-
 .../sql/language/syntax/commands/show-tables.asciidoc     | 4 +++-
 4 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/docs/reference/sql/language/syntax/commands/describe-table.asciidoc b/docs/reference/sql/language/syntax/commands/describe-table.asciidoc
index 81f2d386c26..da02f1fa238 100644
--- a/docs/reference/sql/language/syntax/commands/describe-table.asciidoc
+++ b/docs/reference/sql/language/syntax/commands/describe-table.asciidoc
@@ -6,7 +6,9 @@
 .Synopsis
 [source, sql]
 ----
-DESCRIBE [table identifier<1> | [LIKE pattern<2>]]
+DESCRIBE
+    [table identifier | <1>
+    [LIKE pattern]]     <2>
 ----
 
 <1> single table identifier or double quoted es multi index
@@ -16,7 +18,9 @@ or
 
 [source, sql]
 ----
-DESC [table identifier<1>|[LIKE pattern<2>]]
+DESC 
+    [table identifier | <1>
+    [LIKE pattern]]     <2>
 ----
 
 <1> single table identifier or double quoted es multi index
diff --git a/docs/reference/sql/language/syntax/commands/show-columns.asciidoc b/docs/reference/sql/language/syntax/commands/show-columns.asciidoc
index 6b6acc0c4ac..b21c02358e5 100644
--- a/docs/reference/sql/language/syntax/commands/show-columns.asciidoc
+++ b/docs/reference/sql/language/syntax/commands/show-columns.asciidoc
@@ -6,7 +6,9 @@
 .Synopsis
 [source, sql]
 ----
-SHOW COLUMNS [ FROM | IN ]? [ table identifier<1> | [ LIKE pattern<2> ] ]
+SHOW COLUMNS [ FROM | IN ]?
+    [table identifier | <1>
+    [LIKE pattern] ]    <2>
 ----
 
 <1> single table identifier or double quoted es multi index
diff --git a/docs/reference/sql/language/syntax/commands/show-functions.asciidoc b/docs/reference/sql/language/syntax/commands/show-functions.asciidoc
index e12c4efed59..47c000e81d9 100644
--- a/docs/reference/sql/language/syntax/commands/show-functions.asciidoc
+++ b/docs/reference/sql/language/syntax/commands/show-functions.asciidoc
@@ -6,7 +6,7 @@
 .Synopsis
 [source, sql]
 ----
-SHOW FUNCTIONS [ LIKE pattern<1>? ]?
+SHOW FUNCTIONS [LIKE pattern?]? <1>
 ----
 
 <1> SQL match pattern
diff --git a/docs/reference/sql/language/syntax/commands/show-tables.asciidoc b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc
index 28b5ad4c75d..691d328aa4b 100644
--- a/docs/reference/sql/language/syntax/commands/show-tables.asciidoc
+++ b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc
@@ -6,7 +6,9 @@
 .Synopsis
 [source, sql]
 ----
-SHOW TABLES [ table identifier<1> | [ LIKE pattern<2> ] ]?
+SHOW TABLES
+    [table identifier | <1>
+    [LIKE pattern ]]?   <2>
 ----
 
 <1> single table identifier or double quoted es multi index

From d2a418152dbf6b66145db7b94e78fd0869980752 Mon Sep 17 00:00:00 2001
From: James Rodewig <james.rodewig@elastic.co>
Date: Mon, 22 Apr 2019 06:33:55 -0700
Subject: [PATCH 112/112] [DOCS] Remove inline callouts for Asciidoctor
 migration (#41309)

---
 docs/reference/sql/functions/aggs.asciidoc    | 42 +++++++------
 .../sql/functions/date-time.asciidoc          | 38 ++++++------
 .../reference/sql/functions/grouping.asciidoc |  9 ++-
 .../sql/functions/like-rlike.asciidoc         |  6 +-
 docs/reference/sql/functions/math.asciidoc    | 60 +++++++++++--------
 docs/reference/sql/functions/search.asciidoc  |  9 ++-
 6 files changed, 97 insertions(+), 67 deletions(-)

diff --git a/docs/reference/sql/functions/aggs.asciidoc b/docs/reference/sql/functions/aggs.asciidoc
index cc0f06cb3bb..2beef2c65ea 100644
--- a/docs/reference/sql/functions/aggs.asciidoc
+++ b/docs/reference/sql/functions/aggs.asciidoc
@@ -16,7 +16,7 @@ Functions for computing a _single_ result from a set of input values.
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-AVG(numeric_field<1>)
+AVG(numeric_field) <1>
 --------------------------------------------------
 
 *Input*:
@@ -40,7 +40,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggAvg]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-COUNT(expression<1>)
+COUNT(expression) <1>
 --------------------------------------------------
 
 *Input*:
@@ -70,7 +70,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggCountStar]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-COUNT(ALL field_name<1>)
+COUNT(ALL field_name) <1>
 --------------------------------------------------
 
 *Input*:
@@ -95,7 +95,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggCountAll]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-COUNT(DISTINCT field_name<1>)
+COUNT(DISTINCT field_name) <1>
 --------------------------------------------------
 
 *Input*:
@@ -119,7 +119,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggCountDistinct]
 .Synopsis:
 [source, sql]
 ----------------------------------------------
-FIRST(field_name<1>[, ordering_field_name]<2>)
+FIRST(
+    field_name               <1>
+    [, ordering_field_name]) <2>
 ----------------------------------------------
 
 *Input*:
@@ -214,7 +216,9 @@ the field is also <<before-enabling-fielddata,saved as a keyword>>.
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-LAST(field_name<1>[, ordering_field_name]<2>)
+LAST(
+    field_name               <1>
+    [, ordering_field_name]) <2>
 --------------------------------------------------
 
 *Input*:
@@ -309,7 +313,7 @@ the field is also <<before-enabling-fielddata,`saved as a keyword`>>.
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-MAX(field_name<1>)
+MAX(field_name) <1>
 --------------------------------------------------
 
 *Input*:
@@ -337,7 +341,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggMax]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-MIN(field_name<1>)
+MIN(field_name) <1>
 --------------------------------------------------
 
 *Input*:
@@ -365,7 +369,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggMin]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-SUM(field_name<1>)
+SUM(field_name) <1>
 --------------------------------------------------
 
 *Input*:
@@ -393,7 +397,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggSum]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-KURTOSIS(field_name<1>)
+KURTOSIS(field_name) <1>
 --------------------------------------------------
 
 *Input*:
@@ -417,7 +421,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggKurtosis]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-MAD(field_name<1>)
+MAD(field_name) <1>
 --------------------------------------------------
 
 *Input*:
@@ -441,7 +445,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggMad]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-PERCENTILE(field_name<1>, numeric_exp<2>)
+PERCENTILE(
+    field_name,  <1>
+    numeric_exp) <2>
 --------------------------------------------------
 
 *Input*:
@@ -467,7 +473,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggPercentile]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-PERCENTILE_RANK(field_name<1>, numeric_exp<2>)
+PERCENTILE_RANK(
+    field_name,  <1>
+    numeric_exp) <2>
 --------------------------------------------------
 
 *Input*:
@@ -493,7 +501,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggPercentileRank]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-SKEWNESS(field_name<1>)
+SKEWNESS(field_name) <1>
 --------------------------------------------------
 
 *Input*:
@@ -517,7 +525,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggSkewness]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-STDDEV_POP(field_name<1>)
+STDDEV_POP(field_name) <1>
 --------------------------------------------------
 
 *Input*:
@@ -541,7 +549,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggStddevPop]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-SUM_OF_SQUARES(field_name<1>)
+SUM_OF_SQUARES(field_name) <1>
 --------------------------------------------------
 
 *Input*:
@@ -565,7 +573,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggSumOfSquares]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-VAR_POP(field_name<1>)
+VAR_POP(field_name) <1>
 --------------------------------------------------
 
 *Input*:
diff --git a/docs/reference/sql/functions/date-time.asciidoc b/docs/reference/sql/functions/date-time.asciidoc
index 782ea15f627..d9d5e7bcf14 100644
--- a/docs/reference/sql/functions/date-time.asciidoc
+++ b/docs/reference/sql/functions/date-time.asciidoc
@@ -146,7 +146,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[filterToday]
 [source, sql]
 --------------------------------------------------
 CURRENT_TIME
-CURRENT_TIME([precision <1>])
+CURRENT_TIME([precision]) <1>
 CURTIME
 --------------------------------------------------
 
@@ -203,7 +203,7 @@ function as the maximum number of second fractional digits returned is 3 (millis
 [source, sql]
 --------------------------------------------------
 CURRENT_TIMESTAMP
-CURRENT_TIMESTAMP([precision <1>])
+CURRENT_TIMESTAMP([precision]) <1>
 --------------------------------------------------
 
 *Input*:
@@ -254,7 +254,7 @@ function as the maximum number of second fractional digits returned is 3 (millis
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-DAY_OF_MONTH(datetime_exp<1>)
+DAY_OF_MONTH(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -278,7 +278,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfMonth]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-DAY_OF_WEEK(datetime_exp<1>)
+DAY_OF_WEEK(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -302,7 +302,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfWeek]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-DAY_OF_YEAR(datetime_exp<1>)
+DAY_OF_YEAR(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -326,7 +326,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfYear]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-DAY_NAME(datetime_exp<1>)
+DAY_NAME(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -350,7 +350,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[dayName]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-HOUR_OF_DAY(datetime_exp<1>)
+HOUR_OF_DAY(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -374,7 +374,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[hourOfDay]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-ISO_DAY_OF_WEEK(datetime_exp<1>)
+ISO_DAY_OF_WEEK(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -399,7 +399,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[isoDayOfWeek]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-ISO_WEEK_OF_YEAR(datetime_exp<1>)
+ISO_WEEK_OF_YEAR(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -424,7 +424,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[isoWeekOfYear]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-MINUTE_OF_DAY(datetime_exp<1>)
+MINUTE_OF_DAY(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -448,7 +448,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[minuteOfDay]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-MINUTE_OF_HOUR(datetime_exp<1>)
+MINUTE_OF_HOUR(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -472,7 +472,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[minuteOfHour]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-MONTH(datetime_exp<1>)
+MONTH(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -496,7 +496,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[monthOfYear]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-MONTH_NAME(datetime_exp<1>)
+MONTH_NAME(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -552,7 +552,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[filterNow]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-SECOND_OF_MINUTE(datetime_exp<1>)
+SECOND_OF_MINUTE(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -576,7 +576,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[secondOfMinute]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-QUARTER(datetime_exp<1>)
+QUARTER(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -632,7 +632,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[filterToday]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-WEEK_OF_YEAR(datetime_exp<1>)
+WEEK_OF_YEAR(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -656,7 +656,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[weekOfYear]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-YEAR(datetime_exp<1>)
+YEAR(datetime_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -680,7 +680,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[year]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-EXTRACT(datetime_function<1> FROM datetime_exp<2>)
+EXTRACT(
+    datetime_function  <1>
+    FROM datetime_exp) <2>
 --------------------------------------------------
 
 *Input*:
diff --git a/docs/reference/sql/functions/grouping.asciidoc b/docs/reference/sql/functions/grouping.asciidoc
index 742f072dbd0..0a498a1aace 100644
--- a/docs/reference/sql/functions/grouping.asciidoc
+++ b/docs/reference/sql/functions/grouping.asciidoc
@@ -12,8 +12,13 @@ as part of the <<sql-syntax-group-by, grouping>>.
 .Synopsis:
 [source, sql]
 ----
-HISTOGRAM(numeric_exp<1>, numeric_interval<2>)
-HISTOGRAM(date_exp<3>, date_time_interval<4>)
+HISTOGRAM(
+    numeric_exp,        <1>
+    numeric_interval)   <2>
+
+HISTOGRAM(
+    date_exp,           <3>
+    date_time_interval) <4>
 ----
 
 *Input*:
diff --git a/docs/reference/sql/functions/like-rlike.asciidoc b/docs/reference/sql/functions/like-rlike.asciidoc
index c38f62ae7d7..73212bc1135 100644
--- a/docs/reference/sql/functions/like-rlike.asciidoc
+++ b/docs/reference/sql/functions/like-rlike.asciidoc
@@ -20,7 +20,8 @@ or has an exact sub-field, it will use it as is, or it will automatically use th
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-expression<1> LIKE constant_exp<2>
+expression        <1>
+LIKE constant_exp <2>
 --------------------------------------------------
 
 <1> typically a field, or a constant expression
@@ -59,7 +60,8 @@ IMPORTANT: Even though `LIKE` is a valid option when searching or filtering in {
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-expression<1> RLIKE constant_exp<2>
+expression         <1>
+RLIKE constant_exp <2>
 --------------------------------------------------
 
 <1> typically a field, or a constant expression
diff --git a/docs/reference/sql/functions/math.asciidoc b/docs/reference/sql/functions/math.asciidoc
index f77b6975604..ebef8a305bc 100644
--- a/docs/reference/sql/functions/math.asciidoc
+++ b/docs/reference/sql/functions/math.asciidoc
@@ -16,7 +16,7 @@ to be numeric.
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-ABS(numeric_exp<1>)
+ABS(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -40,7 +40,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[abs]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-CBRT(numeric_exp<1>)
+CBRT(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -64,7 +64,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCbrtWithNegativeValue]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-CEIL(numeric_exp<1>)
+CEIL(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -110,7 +110,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathEulersNumber]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-EXP(numeric_exp<1>)
+EXP(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -134,7 +134,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathExpInline]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-EXPM1(numeric_exp<1>)
+EXPM1(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -158,7 +158,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathExpm1Inline]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-FLOOR(numeric_exp<1>)
+FLOOR(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -182,7 +182,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineFloor]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-LOG(numeric_exp<1>)
+LOG(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -206,7 +206,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineLog]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-LOG10(numeric_exp<1>)
+LOG10(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -252,7 +252,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathPINumber]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-POWER(numeric_exp<1>, integer_exp<2>)
+POWER(
+    numeric_exp, <1>
+    integer_exp) <2>
 --------------------------------------------------
 
 *Input*:
@@ -282,7 +284,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlinePowerNegative]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-RANDOM(seed<1>)
+RANDOM(seed) <1>
 --------------------------------------------------
 
 *Input*:
@@ -306,7 +308,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathRandom]
 .Synopsis:
 [source, sql]
 ----
-ROUND(numeric_exp<1>[, integer_exp<2>])
+ROUND(
+    numeric_exp      <1>
+    [, integer_exp]) <2>
 ----
 *Input*:
 
@@ -337,7 +341,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathRoundWithNegativeParameter]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-SIGN(numeric_exp<1>)
+SIGN(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -362,7 +366,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSign]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-SQRT(numeric_exp<1>)
+SQRT(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -386,7 +390,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSqrt]
 .Synopsis:
 [source, sql]
 ----
-TRUNCATE(numeric_exp<1>[, integer_exp<2>])
+TRUNCATE(
+    numeric_exp      <1>
+    [, integer_exp]) <2>
 ----
 *Input*:
 
@@ -421,7 +427,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathTruncateWithNegativeParameter
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-ACOS(numeric_exp<1>)
+ACOS(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -445,7 +451,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAcos]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-ASIN(numeric_exp<1>)
+ASIN(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -469,7 +475,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAsin]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-ATAN(numeric_exp<1>)
+ATAN(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -493,7 +499,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAtan]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-ATAN2(ordinate<1>, abscisa<2>)
+ATAN2(
+    ordinate, <1>
+    abscisa)  <2>
 --------------------------------------------------
 
 *Input*:
@@ -518,7 +526,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAtan2]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-COS(numeric_exp<1>)
+COS(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -542,7 +550,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCosine]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-COSH(numeric_exp<1>)
+COSH(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -566,7 +574,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCosh]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-COT(numeric_exp<1>)
+COT(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -590,7 +598,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCotangent]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-DEGREES(numeric_exp<1>)
+DEGREES(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -615,7 +623,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineDegrees]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-RADIANS(numeric_exp<1>)
+RADIANS(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -640,7 +648,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineRadians]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-SIN(numeric_exp<1>)
+SIN(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -664,7 +672,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSine]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-SINH(numeric_exp<1>)
+SINH(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
@@ -688,7 +696,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSinh]
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-TAN(numeric_exp<1>)
+TAN(numeric_exp) <1>
 --------------------------------------------------
 
 *Input*:
diff --git a/docs/reference/sql/functions/search.asciidoc b/docs/reference/sql/functions/search.asciidoc
index 0534271caa9..6990f6669d6 100644
--- a/docs/reference/sql/functions/search.asciidoc
+++ b/docs/reference/sql/functions/search.asciidoc
@@ -14,7 +14,10 @@ such as `0` or `NULL`.
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-MATCH(field_exp<1>, constant_exp<2>[, options]<3>)
+MATCH(
+    field_exp,   <1>
+    constant_exp <2>
+    [, options]) <3>
 --------------------------------------------------
 
 *Input*:
@@ -75,7 +78,9 @@ NOTE: The allowed optional parameters for a multi-field `MATCH()` variant (for t
 .Synopsis:
 [source, sql]
 --------------------------------------------------
-QUERY(constant_exp<1>[, options]<2>)
+QUERY(
+    constant_exp <1>
+    [, options]) <2>
 --------------------------------------------------
 
 *Input*: